[3/3] hadoop git commit: HDFS-8163. Using monotonicNow for block report scheduling causes test failures on recently restarted systems. (Arpit Agarwal)

2015-04-21 Thread arp
HDFS-8163. Using monotonicNow for block report scheduling causes test failures 
on recently restarted systems. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b17d365f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b17d365f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b17d365f

Branch: refs/heads/branch-2.7
Commit: b17d365fa1d762ed56fc2077fb0f8396383602f3
Parents: 7dd5f42
Author: Arpit Agarwal a...@apache.org
Authored: Tue Apr 21 10:58:05 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Tue Apr 21 11:17:27 2015 -0700

--
 .../main/java/org/apache/hadoop/util/Time.java  |   2 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/datanode/BPOfferService.java|   2 +-
 .../hdfs/server/datanode/BPServiceActor.java| 203 ---
 .../datanode/TestBpServiceActorScheduler.java   | 163 +++
 5 files changed, 301 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17d365f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
index 347cdc5..987e1b0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
@@ -42,6 +42,8 @@ public final class Time {
* milliseconds, and not affected by settimeofday or similar system clock
* changes.  This is appropriate to use when computing how much longer to
* wait for an interval to expire.
+   * This function can return a negative value and it must be handled correctly
+   * by callers. See the documentation of System#nanoTime for caveats.
* @return a monotonic clock that counts in milliseconds.
*/
   public static long monotonicNow() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17d365f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fb7e7e7..37fa10e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -38,6 +38,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
 goes for infinite loop (vinayakumarb)
 
+HDFS-8163. Using monotonicNow for block report scheduling causes
+test failures on recently restarted systems. (Arpit Agarwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17d365f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 3e7c897..36a868e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -429,7 +429,7 @@ class BPOfferService {
*/
   void scheduleBlockReport(long delay) {
 for (BPServiceActor actor : bpServices) {
-  actor.scheduleBlockReport(delay);
+  actor.getScheduler().scheduleBlockReport(delay);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17d365f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 10cce45..49a1991 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import 

[1/3] hadoop git commit: HDFS-8163. Using monotonicNow for block report scheduling causes test failures on recently restarted systems. (Arpit Agarwal)

2015-04-21 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e7bb0fc92 - 7b3acc5c9
  refs/heads/branch-2.7 7dd5f42a7 - b17d365fa
  refs/heads/trunk 8ddbb8dd4 - dfc1c4c30


HDFS-8163. Using monotonicNow for block report scheduling causes test failures 
on recently restarted systems. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dfc1c4c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dfc1c4c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dfc1c4c3

Branch: refs/heads/trunk
Commit: dfc1c4c303cf15afc6c3361ed9d3238562f73cbd
Parents: 8ddbb8d
Author: Arpit Agarwal a...@apache.org
Authored: Tue Apr 21 10:58:05 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Tue Apr 21 10:58:05 2015 -0700

--
 .../main/java/org/apache/hadoop/util/Time.java  |   2 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/datanode/BPOfferService.java|   2 +-
 .../hdfs/server/datanode/BPServiceActor.java| 203 ---
 .../datanode/TestBpServiceActorScheduler.java   | 163 +++
 5 files changed, 299 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfc1c4c3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
index b988923..20e2965 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
@@ -47,6 +47,8 @@ public final class Time {
* milliseconds, and not affected by settimeofday or similar system clock
* changes.  This is appropriate to use when computing how much longer to
* wait for an interval to expire.
+   * This function can return a negative value and it must be handled correctly
+   * by callers. See the documentation of System#nanoTime for caveats.
* @return a monotonic clock that counts in milliseconds.
*/
   public static long monotonicNow() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfc1c4c3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6951a08..e07e45d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -566,6 +566,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
 goes for infinite loop (vinayakumarb)
 
+HDFS-8163. Using monotonicNow for block report scheduling causes
+test failures on recently restarted systems. (Arpit Agarwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfc1c4c3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 1b42b19..92323f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -429,7 +429,7 @@ class BPOfferService {
*/
   void scheduleBlockReport(long delay) {
 for (BPServiceActor actor : bpServices) {
-  actor.scheduleBlockReport(delay);
+  actor.getScheduler().scheduleBlockReport(delay);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfc1c4c3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index ba5..5bc505f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import 

[2/3] hadoop git commit: HDFS-8163. Using monotonicNow for block report scheduling causes test failures on recently restarted systems. (Arpit Agarwal)

2015-04-21 Thread arp
HDFS-8163. Using monotonicNow for block report scheduling causes test failures 
on recently restarted systems. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b3acc5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b3acc5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b3acc5c

Branch: refs/heads/branch-2
Commit: 7b3acc5c9dc6240fc15ff399061ffc8b5017bfd8
Parents: e7bb0fc
Author: Arpit Agarwal a...@apache.org
Authored: Tue Apr 21 10:58:05 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Tue Apr 21 10:58:12 2015 -0700

--
 .../main/java/org/apache/hadoop/util/Time.java  |   2 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/datanode/BPOfferService.java|   2 +-
 .../hdfs/server/datanode/BPServiceActor.java| 203 ---
 .../datanode/TestBpServiceActorScheduler.java   | 163 +++
 5 files changed, 299 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b3acc5c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
index b988923..20e2965 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
@@ -47,6 +47,8 @@ public final class Time {
* milliseconds, and not affected by settimeofday or similar system clock
* changes.  This is appropriate to use when computing how much longer to
* wait for an interval to expire.
+   * This function can return a negative value and it must be handled correctly
+   * by callers. See the documentation of System#nanoTime for caveats.
* @return a monotonic clock that counts in milliseconds.
*/
   public static long monotonicNow() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b3acc5c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d8558e1..51168e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -248,6 +248,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
 goes for infinite loop (vinayakumarb)
 
+HDFS-8163. Using monotonicNow for block report scheduling causes
+test failures on recently restarted systems. (Arpit Agarwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b3acc5c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 67979f3..5097e4a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -430,7 +430,7 @@ class BPOfferService {
*/
   void scheduleBlockReport(long delay) {
 for (BPServiceActor actor : bpServices) {
-  actor.scheduleBlockReport(delay);
+  actor.getScheduler().scheduleBlockReport(delay);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b3acc5c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index ba5..5bc505f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import 

hadoop git commit: HADOOP-11704. DelegationTokenAuthenticationFilter must pass ipaddress instead of hostname to ProxyUsers#authorize (Anubhav Dhoot via asuresh)

2015-04-21 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk dfc1c4c30 - 424a00daa


HADOOP-11704. DelegationTokenAuthenticationFilter must pass ipaddress instead 
of hostname to ProxyUsers#authorize (Anubhav Dhoot via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/424a00da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/424a00da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/424a00da

Branch: refs/heads/trunk
Commit: 424a00daa069bf2049014fd46ad152ec5fc77ac8
Parents: dfc1c4c
Author: Arun Suresh asur...@apache.org
Authored: Tue Apr 21 11:31:51 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Tue Apr 21 11:31:51 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../DelegationTokenAuthenticationFilter.java|  2 +-
 .../DelegationTokenAuthenticationHandler.java   |  2 +-
 .../delegation/web/TestWebDelegationToken.java  | 56 +++-
 4 files changed, 60 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/424a00da/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 230717c..5c6d44a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -519,6 +519,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11811. Fix typos in hadoop-project/pom.xml and 
TestAccessControlList.
 (Brahma Reddy Battula via ozawa)
 
+HADOOP-11704. DelegationTokenAuthenticationFilter must pass ipaddress
+instead of hostname to ProxyUsers#authorize (Anubhav Dhoot via asuresh)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/424a00da/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
index fbd1129..b6e1a76 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
@@ -239,7 +239,7 @@ public class DelegationTokenAuthenticationFilter
 if (doAsUser != null) {
   ugi = UserGroupInformation.createProxyUser(doAsUser, ugi);
   try {
-ProxyUsers.authorize(ugi, request.getRemoteHost());
+ProxyUsers.authorize(ugi, request.getRemoteAddr());
   } catch (AuthorizationException ex) {
 HttpExceptionUtils.createServletExceptionResponse(response,
 HttpServletResponse.SC_FORBIDDEN, ex);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/424a00da/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
index c498f70..3f191de 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -199,7 +199,7 @@ public abstract class DelegationTokenAuthenticationHandler
 requestUgi = UserGroupInformation.createProxyUser(
 doAsUser, requestUgi);
 try {
-  ProxyUsers.authorize(requestUgi, request.getRemoteHost());
+  ProxyUsers.authorize(requestUgi, request.getRemoteAddr());
 } catch (AuthorizationException ex) {
   HttpExceptionUtils.createServletExceptionResponse(response,
   HttpServletResponse.SC_FORBIDDEN, ex);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/424a00da/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java

hadoop git commit: HADOOP-11704. DelegationTokenAuthenticationFilter must pass ipaddress instead of hostname to ProxyUsers#authorize (Anubhav Dhoot via asuresh)

2015-04-21 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7b3acc5c9 - d2a9cc287


HADOOP-11704. DelegationTokenAuthenticationFilter must pass ipaddress instead 
of hostname to ProxyUsers#authorize (Anubhav Dhoot via asuresh)

(cherry picked from commit 424a00daa069bf2049014fd46ad152ec5fc77ac8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2a9cc28
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2a9cc28
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2a9cc28

Branch: refs/heads/branch-2
Commit: d2a9cc287ba4a4361a6b65c04400aa0928cf95f4
Parents: 7b3acc5
Author: Arun Suresh asur...@apache.org
Authored: Tue Apr 21 11:31:51 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Tue Apr 21 11:35:04 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../DelegationTokenAuthenticationFilter.java|  2 +-
 .../DelegationTokenAuthenticationHandler.java   |  2 +-
 .../delegation/web/TestWebDelegationToken.java  | 56 +++-
 4 files changed, 60 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2a9cc28/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8e45588..a146ba3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -78,6 +78,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11811. Fix typos in hadoop-project/pom.xml and 
TestAccessControlList.
 (Brahma Reddy Battula via ozawa)
 
+HADOOP-11704. DelegationTokenAuthenticationFilter must pass ipaddress
+instead of hostname to ProxyUsers#authorize (Anubhav Dhoot via asuresh)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2a9cc28/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
index fbd1129..b6e1a76 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
@@ -239,7 +239,7 @@ public class DelegationTokenAuthenticationFilter
 if (doAsUser != null) {
   ugi = UserGroupInformation.createProxyUser(doAsUser, ugi);
   try {
-ProxyUsers.authorize(ugi, request.getRemoteHost());
+ProxyUsers.authorize(ugi, request.getRemoteAddr());
   } catch (AuthorizationException ex) {
 HttpExceptionUtils.createServletExceptionResponse(response,
 HttpServletResponse.SC_FORBIDDEN, ex);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2a9cc28/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
index c498f70..3f191de 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -199,7 +199,7 @@ public abstract class DelegationTokenAuthenticationHandler
 requestUgi = UserGroupInformation.createProxyUser(
 doAsUser, requestUgi);
 try {
-  ProxyUsers.authorize(requestUgi, request.getRemoteHost());
+  ProxyUsers.authorize(requestUgi, request.getRemoteAddr());
 } catch (AuthorizationException ex) {
   HttpExceptionUtils.createServletExceptionResponse(response,
   HttpServletResponse.SC_FORBIDDEN, ex);


hadoop git commit: HDFS-8133. Improve readability of deleted block check (Daryn Sharp via Colin P. McCabe)

2015-04-21 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 424a00daa - 997408eaa


HDFS-8133. Improve readability of deleted block check (Daryn Sharp via Colin P. 
McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/997408ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/997408ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/997408ea

Branch: refs/heads/trunk
Commit: 997408eaaceef20b053ee7344468e28cb9a1379b
Parents: 424a00d
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue Apr 21 11:41:22 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Tue Apr 21 11:43:24 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../blockmanagement/BlockInfoContiguous.java |  4 
 .../server/blockmanagement/BlockManager.java | 19 ++-
 .../hdfs/server/blockmanagement/BlocksMap.java   |  2 +-
 .../hdfs/server/namenode/FSNamesystem.java   |  5 ++---
 .../server/blockmanagement/TestBlockInfo.java| 10 ++
 6 files changed, 30 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/997408ea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e07e45d..e162d28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -452,6 +452,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. (wheat9)
 
+HDFS-8133. Improve readability of deleted block check (Daryn Sharp via
+Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/997408ea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 48069c1..4314ab3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -86,6 +86,10 @@ public class BlockInfoContiguous extends Block
 this.bc = bc;
   }
 
+  public boolean isDeleted() {
+return (bc == null);
+  }
+
   public DatanodeDescriptor getDatanode(int index) {
 DatanodeStorageInfo storage = getStorageInfo(index);
 return storage == null ? null : storage.getDatanodeDescriptor();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/997408ea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 2a7b02a..1db1356 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1165,13 +1165,14 @@ public class BlockManager {
   DatanodeStorageInfo storageInfo,
   DatanodeDescriptor node) throws IOException {
 
-BlockCollection bc = b.corrupted.getBlockCollection();
-if (bc == null) {
+if (b.corrupted.isDeleted()) {
   blockLog.info(BLOCK markBlockAsCorrupt: {} cannot be marked as +
corrupt as it does not belong to any file, b);
   addToInvalidates(b.corrupted, node);
   return;
 } 
+short expectedReplicas =
+b.corrupted.getBlockCollection().getBlockReplication();
 
 // Add replica to the data-node if it is not already there
 if (storageInfo != null) {
@@ -1183,13 +1184,13 @@ public class BlockManager {
 b.reasonCode);
 
 NumberReplicas numberOfReplicas = countNodes(b.stored);
-boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() = bc
-.getBlockReplication();
+boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() =
+expectedReplicas;
 boolean minReplicationSatisfied =
 numberOfReplicas.liveReplicas() = minReplication;
 boolean 

hadoop git commit: HDFS-8133. Improve readability of deleted block check (Daryn Sharp via Colin P. McCabe)

2015-04-21 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d2a9cc287 - 447f2f699


HDFS-8133. Improve readability of deleted block check (Daryn Sharp via Colin P. 
McCabe)

(cherry picked from commit 997408eaaceef20b053ee7344468e28cb9a1379b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/447f2f69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/447f2f69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/447f2f69

Branch: refs/heads/branch-2
Commit: 447f2f699e14eff7c278e18980fd11994f070f36
Parents: d2a9cc2
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue Apr 21 11:41:22 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Tue Apr 21 12:06:33 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../blockmanagement/BlockInfoContiguous.java |  4 
 .../server/blockmanagement/BlockManager.java | 19 ++-
 .../hdfs/server/blockmanagement/BlocksMap.java   |  2 +-
 .../hdfs/server/namenode/FSNamesystem.java   |  5 ++---
 .../server/blockmanagement/TestBlockInfo.java| 10 ++
 6 files changed, 30 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/447f2f69/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 51168e2..8f16534 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -134,6 +134,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. (wheat9)
 
+HDFS-8133. Improve readability of deleted block check (Daryn Sharp via
+Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/447f2f69/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 1d3df26..1039a98 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -89,6 +89,10 @@ public class BlockInfoContiguous extends Block
 this.bc = bc;
   }
 
+  public boolean isDeleted() {
+return (bc == null);
+  }
+
   public DatanodeDescriptor getDatanode(int index) {
 DatanodeStorageInfo storage = getStorageInfo(index);
 return storage == null ? null : storage.getDatanodeDescriptor();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/447f2f69/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index c4357d2..842faac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1165,13 +1165,14 @@ public class BlockManager {
   DatanodeStorageInfo storageInfo,
   DatanodeDescriptor node) throws IOException {
 
-BlockCollection bc = b.corrupted.getBlockCollection();
-if (bc == null) {
+if (b.corrupted.isDeleted()) {
   blockLog.info(BLOCK markBlockAsCorrupt: {} cannot be marked as +
corrupt as it does not belong to any file, b);
   addToInvalidates(b.corrupted, node);
   return;
 } 
+short expectedReplicas =
+b.corrupted.getBlockCollection().getBlockReplication();
 
 // Add replica to the data-node if it is not already there
 if (storageInfo != null) {
@@ -1183,13 +1184,13 @@ public class BlockManager {
 b.reasonCode);
 
 NumberReplicas numberOfReplicas = countNodes(b.stored);
-boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() = bc
-.getBlockReplication();
+boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() =
+expectedReplicas;
 boolean minReplicationSatisfied =
  

hadoop git commit: HDFS-7993. Provide each Replica details in fsck (Contributed by J.Andreina)

2015-04-21 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk d52de6154 - 8ddbb8dd4


HDFS-7993. Provide each Replica details in fsck (Contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ddbb8dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ddbb8dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ddbb8dd

Branch: refs/heads/trunk
Commit: 8ddbb8dd433862509bd9b222dddafe2c3a74778a
Parents: d52de61
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 21 15:24:49 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Apr 21 15:24:49 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../blockmanagement/DatanodeStorageInfo.java|  6 +-
 .../hdfs/server/namenode/NamenodeFsck.java  | 61 ++---
 .../org/apache/hadoop/hdfs/tools/DFSck.java |  6 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 71 
 5 files changed, 132 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ddbb8dd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1aa9ce4..6951a08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -526,6 +526,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8173. NPE thrown at DataNode shutdown when HTTP server was not able to
 create (surendra singh lilhore via vinayakumarb)
 
+HDFS-7993. Provide each Replica details in fsck (J.Andreina via 
vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ddbb8dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index 8c752ac..c6c9001 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -155,7 +155,7 @@ public class DatanodeStorageInfo {
 this.blockReportCount = blockReportCount;
   }
 
-  boolean areBlockContentsStale() {
+  public boolean areBlockContentsStale() {
 return blockContentsStale;
   }
 
@@ -205,11 +205,11 @@ public class DatanodeStorageInfo {
 return getState() == State.FAILED  numBlocks != 0;
   }
 
-  String getStorageID() {
+  public String getStorageID() {
 return storageID;
   }
 
-  StorageType getStorageType() {
+  public StorageType getStorageType() {
 return storageType;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ddbb8dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index a8586dd..afaec87 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -68,9 +69,11 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import 

hadoop git commit: HDFS-7993. Provide each Replica details in fsck (Contributed by J.Andreina)

2015-04-21 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 27fc4fb99 - e7bb0fc92


HDFS-7993. Provide each Replica details in fsck (Contributed by J.Andreina)

(cherry picked from commit 8ddbb8dd433862509bd9b222dddafe2c3a74778a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7bb0fc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7bb0fc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7bb0fc9

Branch: refs/heads/branch-2
Commit: e7bb0fc922295fe8086797b521fb721a06ac6137
Parents: 27fc4fb
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 21 15:24:49 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Apr 21 15:27:15 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../blockmanagement/DatanodeStorageInfo.java|  6 +-
 .../hdfs/server/namenode/NamenodeFsck.java  | 61 ++---
 .../org/apache/hadoop/hdfs/tools/DFSck.java |  6 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 71 
 5 files changed, 132 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7bb0fc9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5c5e59f..d8558e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -208,6 +208,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8173. NPE thrown at DataNode shutdown when HTTP server was not able to
 create (surendra singh lilhore via vinayakumarb)
 
+HDFS-7993. Provide each Replica details in fsck (J.Andreina via 
vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7bb0fc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index 8c752ac..c6c9001 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -155,7 +155,7 @@ public class DatanodeStorageInfo {
 this.blockReportCount = blockReportCount;
   }
 
-  boolean areBlockContentsStale() {
+  public boolean areBlockContentsStale() {
 return blockContentsStale;
   }
 
@@ -205,11 +205,11 @@ public class DatanodeStorageInfo {
 return getState() == State.FAILED  numBlocks != 0;
   }
 
-  String getStorageID() {
+  public String getStorageID() {
 return storageID;
   }
 
-  StorageType getStorageType() {
+  public StorageType getStorageType() {
 return storageType;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7bb0fc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index a1ad4d3..52fc105 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -68,9 +69,11 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import 

[Hadoop Wiki] Update of AmazonS3 by SteveLoughran

2015-04-21 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The AmazonS3 page has been changed by SteveLoughran:
https://wiki.apache.org/hadoop/AmazonS3?action=diffrev1=18rev2=19

Comment:
remove all content on configuring the S3 filesystems -point to the markdown 
docs on github instead.

  = History =
   * The S3 block filesystem was introduced in Hadoop 0.10.0 
([[http://issues.apache.org/jira/browse/HADOOP-574|HADOOP-574]]).
   * The S3 native filesystem was introduced in Hadoop 0.18.0 
([[http://issues.apache.org/jira/browse/HADOOP-930|HADOOP-930]]) and rename 
support was added in Hadoop 0.19.0 
([[https://issues.apache.org/jira/browse/HADOOP-3361|HADOOP-3361]]).
-  * The S3A filesystem was introduced in Hadoop 2.6.0. Some issues were found 
and fixed for later Hadoop 
versions[[https://issues.apache.org/jira/browse/HADOOP-11571|HADOOP-11571]], so 
Hadoop-2.6.0's support of s3a must be considered an incomplete replacement for 
the s3n FS.
+  * The S3A filesystem was introduced in Hadoop 2.6.0. Some issues were found 
and fixed for later Hadoop versions 
[[https://issues.apache.org/jira/browse/HADOOP-11571|HADOOP-11571]].
  
- = Why you cannot use S3 as a replacement for HDFS =
  
+ = Configuring and using the S3 filesystem support =
+ 
+ Consult the 
[[https://github.com/apache/hadoop/blob/trunk/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md|Latest
 Hadoop documentation]] for the specifics on using any of the S3 clients.
+ 
+ 
+ = Important: you cannot use S3 as a replacement for HDFS =
+ 
- You cannot use either of the S3 filesystem clients as a drop-in replacement 
for HDFS. Amazon S3 is an object store with
+ You cannot use any of the S3 filesystem clients as a drop-in replacement for 
HDFS. Amazon S3 is an object store with
   * eventual consistency: changes made by one application (creation, updates 
and deletions) will not be visible until some undefined time.
   * s3n and s3a: non-atomic rename and delete operations. Renaming or deleting 
large directories takes time proportional to the number of entries -and visible 
to other processes during this time, and indeed, until the eventual consistency 
has been resolved.
  
  S3 is not a filesystem. The Hadoop S3 filesystem bindings make it pretend to 
be a filesystem, but it is not. It can
  act as a source of data, and as a destination -though in the latter case, you 
must remember that the output may not be immediately visible.
- 
- == Configuring to use s3/ s3n filesystems ==
- 
- Edit your `core-site.xml` file to include your S3 keys
- 
- {{{
- 
- property
-   namefs.s3.awsAccessKeyId/name
-   valueID/value
- /property
- 
- property
-   namefs.s3.awsSecretAccessKey/name
-   valueSECRET/value
- /property
- }}}
- 
- You can then use URLs to your bucket : ``s3n://MYBUCKET/``, or directories 
and files inside it.
- 
- {{{
- 
- s3n://BUCKET/
- s3n://BUCKET/dir
- s3n://BUCKET/dir/files.csv.tar.gz
- s3n://BUCKET/dir/*.gz
- 
- }}}
- 
- Alternatively, you can put the access key ID and the secret access key into a 
''s3n'' (or ''s3'') URI as the user info:
- 
- {{{
-   s3n://ID:SECRET@BUCKET
- }}}
- 
- Note that since the secret
- access key can contain slashes, you must remember to escape them by replacing 
each slash `/` with the string `%2F`.
- Keys specified in the URI take precedence over any specified using the 
properties `fs.s3.awsAccessKeyId` and
- `fs.s3.awsSecretAccessKey`.
- 
- This option is less secure as the URLs are likely to appear in output logs 
and error messages, so being exposed to remote users.
  
  = Security =
  


[3/3] hadoop git commit: HADOOP-11746. rewrite test-patch.sh (aw)

2015-04-21 Thread aw
HADOOP-11746. rewrite test-patch.sh (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73ddb6b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73ddb6b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73ddb6b4

Branch: refs/heads/trunk
Commit: 73ddb6b4f825be1d06fd1d2be86a4bea241e7aa0
Parents: 997408e
Author: Allen Wittenauer a...@apache.org
Authored: Tue Apr 21 21:29:45 2015 +0100
Committer: Allen Wittenauer a...@apache.org
Committed: Tue Apr 21 21:29:45 2015 +0100

--
 dev-support/shelldocs.py|   31 +-
 dev-support/test-patch.d/checkstyle.sh  |  149 +
 dev-support/test-patch.d/shellcheck.sh  |  138 +
 dev-support/test-patch.d/whitespace.sh  |   40 +
 dev-support/test-patch.sh   | 2839 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |2 +
 6 files changed, 2430 insertions(+), 769 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73ddb6b4/dev-support/shelldocs.py
--
diff --git a/dev-support/shelldocs.py b/dev-support/shelldocs.py
index 2547450..fc7601a 100755
--- a/dev-support/shelldocs.py
+++ b/dev-support/shelldocs.py
@@ -17,6 +17,26 @@ import sys
 import string
 from optparse import OptionParser
 
+asflicense='''
+!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# License); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+--
+'''
+
 def docstrip(key,string):
   string=re.sub(^## @%s  % key ,,string)
   string=string.lstrip()
@@ -220,17 +240,18 @@ def main():
   funcdef.addreturn(line)
 elif line.startswith('function'):
   funcdef.setname(line)
-  if options.skipprnorep:
-if funcdef.getaudience() == Private and \
-   funcdef.getreplace() == No:
+  if options.skipprnorep and \
+funcdef.getaudience() == Private and \
+funcdef.getreplace() == No:
pass
-else:
-  allfuncs.append(funcdef)
+  else:
+allfuncs.append(funcdef)
   funcdef=ShellFunction()
 
   allfuncs=sorted(allfuncs)
 
   outfile=open(options.outfile, w)
+  outfile.write(asflicense)
   for line in toc(allfuncs):
 outfile.write(line)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73ddb6b4/dev-support/test-patch.d/checkstyle.sh
--
diff --git a/dev-support/test-patch.d/checkstyle.sh 
b/dev-support/test-patch.d/checkstyle.sh
new file mode 100755
index 000..460709e
--- /dev/null
+++ b/dev-support/test-patch.d/checkstyle.sh
@@ -0,0 +1,149 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the License); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_plugin checkstyle
+
+CHECKSTYLE_TIMER=0
+
+# if it ends in an explicit .sh, then this is shell code.
+# if it doesn't have an extension, we assume it is shell code too
+function checkstyle_filefilter
+{
+  local filename=$1
+
+  if [[ ${filename} =~ \.java$ ]]; then
+add_test checkstyle
+  fi
+}
+
+function checkstyle_preapply
+{
+  verify_needed_test checkstyle
+
+  if [[ $? == 0 ]]; then
+return 0
+  fi
+
+  big_console_header checkstyle plugin: prepatch
+
+  start_clock
+  echo_and_redirect ${PATCH_DIR}/${PATCH_BRANCH}checkstyle.txt ${MVN} test 
checkstyle:checkstyle-aggregate -DskipTests 

hadoop git commit: YARN-3495. Confusing log generated by FairScheduler. Contributed by Brahma Reddy Battula.

2015-04-21 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b7251723d - 0b87ae27a


YARN-3495. Confusing log generated by FairScheduler. Contributed by Brahma 
Reddy Battula.

(cherry picked from commit 105afd54779852c518b978101f23526143e234a5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b87ae27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b87ae27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b87ae27

Branch: refs/heads/branch-2
Commit: 0b87ae27aadff1b16f95c5b61b53d597d2377dcc
Parents: b725172
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Apr 22 05:47:59 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Apr 22 05:48:16 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../server/resourcemanager/scheduler/fair/FairScheduler.java| 5 +++--
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b87ae27/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 446f893..06b97e4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -185,6 +185,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3136. Fixed a synchronization problem of
 AbstractYarnScheduler#getTransferredContainers. (Sunil G via jianhe)
 
+YARN-3495. Confusing log generated by FairScheduler.
+(Brahma Reddy Battula via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b87ae27/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index a6c5416..f481de5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -796,7 +796,8 @@ public class FairScheduler extends
   protected synchronized void completedContainer(RMContainer rmContainer,
   ContainerStatus containerStatus, RMContainerEventType event) {
 if (rmContainer == null) {
-  LOG.info(Null container completed...);
+  LOG.info(Container  + containerStatus.getContainerId()
+  +  completed with event  + event);
   return;
 }
 
@@ -809,7 +810,7 @@ public class FairScheduler extends
 container.getId().getApplicationAttemptId().getApplicationId();
 if (application == null) {
   LOG.info(Container  + container +  of +
-   unknown application attempt  + appId +
+   finished application  + appId +
completed with event  + event);
   return;
 }



[2/3] hadoop git commit: HADOOP-11746. rewrite test-patch.sh (aw)

2015-04-21 Thread aw
http://git-wip-us.apache.org/repos/asf/hadoop/blob/73ddb6b4/dev-support/test-patch.sh
--
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 574a4fd..6e8679e 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -1,728 +1,1740 @@
 #!/usr/bin/env bash
-#   Licensed under the Apache License, Version 2.0 (the License);
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the License); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
 #
-#   http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
 #
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an AS IS BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
+### BUILD_URL is set by Hudson if it is run by patch process
 
-#set -x
+this=${BASH_SOURCE-$0}
+BINDIR=$(cd -P -- $(dirname -- ${this}) /dev/null  pwd -P)
+CWD=$(pwd)
+USER_PARAMS=($@)
+GLOBALTIMER=$(date +%s)
+
+## @description  Setup the default global variables
+## @audience public
+## @stabilitystable
+## @replaceable  no
+function setup_defaults
+{
+  if [[ -z ${MAVEN_HOME:-} ]]; then
+MVN=mvn
+  else
+MVN=${MAVEN_HOME}/bin/mvn
+  fi
 
-### Setup some variables.  
-### BUILD_URL is set by Hudson if it is run by patch process
-### Read variables from properties file
-bindir=$(dirname $0)
-
-# Defaults
-if [ -z $MAVEN_HOME ]; then
-  MVN=mvn
-else
-  MVN=$MAVEN_HOME/bin/mvn
-fi
+  PROJECT_NAME=hadoop
+  JENKINS=false
+  PATCH_DIR=/tmp/${PROJECT_NAME}-test-patch/$$
+  BASEDIR=$(pwd)
 
-PROJECT_NAME=Hadoop
-JENKINS=false
-PATCH_DIR=/tmp
-SUPPORT_DIR=/tmp
-BASEDIR=$(pwd)
-BUILD_NATIVE=true
-PS=${PS:-ps}
-AWK=${AWK:-awk}
-WGET=${WGET:-wget}
-GIT=${GIT:-git}
-GREP=${GREP:-grep}
-PATCH=${PATCH:-patch}
-DIFF=${DIFF:-diff}
-JIRACLI=${JIRA:-jira}
-FINDBUGS_HOME=${FINDBUGS_HOME}
-FORREST_HOME=${FORREST_HOME}
-ECLIPSE_HOME=${ECLIPSE_HOME}
+  FINDBUGS_HOME=${FINDBUGS_HOME:-}
+  ECLIPSE_HOME=${ECLIPSE_HOME:-}
+  BUILD_NATIVE=${BUILD_NATIVE:-true}
+  PATCH_BRANCH=
+  CHANGED_MODULES=
+  USER_MODULE_LIST=
+  OFFLINE=false
+  CHANGED_FILES=
+  REEXECED=false
+  RESETREPO=false
+  ISSUE=
+  ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$'
+  TIMER=$(date +%s)
+
+  OSTYPE=$(uname -s)
+
+  # Solaris needs POSIX, not SVID
+  case ${OSTYPE} in
+SunOS)
+  PS=${PS:-ps}
+  AWK=${AWK:-/usr/xpg4/bin/awk}
+  SED=${SED:-/usr/xpg4/bin/sed}
+  WGET=${WGET:-wget}
+  GIT=${GIT:-git}
+  EGREP=${EGREP:-/usr/xpg4/bin/egrep}
+  GREP=${GREP:-/usr/xpg4/bin/grep}
+  PATCH=${PATCH:-patch}
+  DIFF=${DIFF:-diff}
+  JIRACLI=${JIRA:-jira}
+;;
+*)
+  PS=${PS:-ps}
+  AWK=${AWK:-awk}
+  SED=${SED:-sed}
+  WGET=${WGET:-wget}
+  GIT=${GIT:-git}
+  EGREP=${EGREP:-egrep}
+  GREP=${GREP:-grep}
+  PATCH=${PATCH:-patch}
+  DIFF=${DIFF:-diff}
+  JIRACLI=${JIRA:-jira}
+;;
+  esac
+
+  declare -a JIRA_COMMENT_TABLE
+  declare -a JIRA_FOOTER_TABLE
+  declare -a JIRA_HEADER
+  declare -a JIRA_TEST_TABLE
+
+  JFC=0
+  JTC=0
+  JTT=0
+  RESULT=0
+}
 
-###
-printUsage() {
-  echo Usage: $0 [options] patch-file | defect-number
+## @description  Print a message to stderr
+## @audience public
+## @stabilitystable
+## @replaceable  no
+## @paramstring
+function hadoop_error
+{
+  echo $* 12
+}
+
+## @description  Print a message to stderr if --debug is turned on
+## @audience public
+## @stabilitystable
+## @replaceable  no
+## @paramstring
+function hadoop_debug
+{
+  if [[ -n ${HADOOP_SHELL_SCRIPT_DEBUG} ]]; then
+echo [$(date) DEBUG]: $* 12
+  fi
+}
+
+## @description  Activate the local timer
+## @audience public
+## @stabilitystable
+## @replaceable  no
+function start_clock
+{
+  hadoop_debug Start clock
+  TIMER=$(date +%s)
+}
+
+## @description  Print the elapsed time in seconds since the start of the 
local timer
+## @audience public
+## 

[1/3] hadoop git commit: HADOOP-11746. rewrite test-patch.sh (aw)

2015-04-21 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 997408eaa - 73ddb6b4f


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73ddb6b4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5c6d44a..bcbffb7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -447,6 +447,8 @@ Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
+HADOOP-11746. rewrite test-patch.sh (aw)
+
   NEW FEATURES
 
 HADOOP-11226. Add a configuration to set ipc.Client's traffic class with



[3/3] hadoop git commit: HADOOP-11746. rewrite test-patch.sh (aw)

2015-04-21 Thread aw
HADOOP-11746. rewrite test-patch.sh (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b7251723
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b7251723
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b7251723

Branch: refs/heads/branch-2
Commit: b7251723d8347b764b0792e1b6bc4d9a03b71338
Parents: 447f2f6
Author: Allen Wittenauer a...@apache.org
Authored: Tue Apr 21 21:22:09 2015 +0100
Committer: Allen Wittenauer a...@apache.org
Committed: Tue Apr 21 21:22:09 2015 +0100

--
 dev-support/test-patch.d/checkstyle.sh  |  149 +
 dev-support/test-patch.d/shellcheck.sh  |  138 +
 dev-support/test-patch.d/whitespace.sh  |   40 +
 dev-support/test-patch.sh   | 2856 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |2 +
 5 files changed, 2491 insertions(+), 694 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7251723/dev-support/test-patch.d/checkstyle.sh
--
diff --git a/dev-support/test-patch.d/checkstyle.sh 
b/dev-support/test-patch.d/checkstyle.sh
new file mode 100755
index 000..460709e
--- /dev/null
+++ b/dev-support/test-patch.d/checkstyle.sh
@@ -0,0 +1,149 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the License); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_plugin checkstyle
+
+CHECKSTYLE_TIMER=0
+
+# if it ends in an explicit .sh, then this is shell code.
+# if it doesn't have an extension, we assume it is shell code too
+function checkstyle_filefilter
+{
+  local filename=$1
+
+  if [[ ${filename} =~ \.java$ ]]; then
+add_test checkstyle
+  fi
+}
+
+function checkstyle_preapply
+{
+  verify_needed_test checkstyle
+
+  if [[ $? == 0 ]]; then
+return 0
+  fi
+
+  big_console_header checkstyle plugin: prepatch
+
+  start_clock
+  echo_and_redirect ${PATCH_DIR}/${PATCH_BRANCH}checkstyle.txt ${MVN} test 
checkstyle:checkstyle-aggregate -DskipTests -D${PROJECT_NAME}PatchProcess
+  if [[ $? != 0 ]] ; then
+echo Pre-patch ${PATCH_BRANCH} checkstyle compilation is broken?
+add_jira_table -1 checkstyle Pre-patch ${PATCH_BRANCH} checkstyle 
compilation may be broken.
+return 1
+  fi
+
+  cp -p ${BASEDIR}/target/checkstyle-result.xml \
+${PATCH_DIR}/checkstyle-result-${PATCH_BRANCH}.xml
+
+  # keep track of how much as elapsed for us already
+  CHECKSTYLE_TIMER=$(stop_clock)
+  return 0
+}
+
+function checkstyle_postapply
+{
+  verify_needed_test checkstyle
+
+  if [[ $? == 0 ]]; then
+return 0
+  fi
+
+  big_console_header checkstyle plugin: postpatch
+
+  start_clock
+
+  # add our previous elapsed to our new timer
+  # by setting the clock back
+  offset_clock ${CHECKSTYLE_TIMER}
+
+  echo_and_redirect ${PATCH_DIR}/patchcheckstyle.txt ${MVN} test 
checkstyle:checkstyle-aggregate -DskipTests -D${PROJECT_NAME}PatchProcess
+  if [[ $? != 0 ]] ; then
+echo Post-patch checkstyle compilation is broken.
+add_jira_table -1 checkstyle Post-patch checkstyle compilation is broken.
+return 1
+  fi
+
+  cp -p ${BASEDIR}/target/checkstyle-result.xml \
+${PATCH_DIR}/checkstyle-result-patch.xml
+
+  checkstyle_runcomparison
+
+  # shellcheck disable=SC2016
+  CHECKSTYLE_POSTPATCH=$(wc -l ${PATCH_DIR}/checkstyle-result-diff.txt | 
${AWK} '{print $1}')
+
+  if [[ ${CHECKSTYLE_POSTPATCH} -gt 0 ]] ; then
+
+add_jira_table -1 checkstyle The applied patch generated \
+  ${CHECKSTYLE_POSTPATCH} \
+   additional checkstyle issues.
+add_jira_footer checkstyle @@BASE@@/checkstyle-result-diff.txt
+
+return 1
+  fi
+  add_jira_table +1 checkstyle There were no new checkstyle issues.
+  return 0
+}
+
+
+function checkstyle_runcomparison
+{
+
+  python (cat EOF
+import os
+import sys
+import xml.etree.ElementTree as etree
+from collections import defaultdict
+
+if len(sys.argv) != 3 :
+  print usage: %s checkstyle-result-master.xml checkstyle-result-patch.xml % 
sys.argv[0]
+  exit(1)
+
+def path_key(x):
+  path = x.attrib['name']
+  return path[path.find('${PROJECT_NAME}-'):]
+
+def 

[2/3] hadoop git commit: HADOOP-11746. rewrite test-patch.sh (aw)

2015-04-21 Thread aw
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7251723/dev-support/test-patch.sh
--
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 56db544..6e8679e 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -1,846 +1,2314 @@
 #!/usr/bin/env bash
-#   Licensed under the Apache License, Version 2.0 (the License);
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the License); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
 #
-#   http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
 #
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an AS IS BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
+### BUILD_URL is set by Hudson if it is run by patch process
 
-#set -x
+this=${BASH_SOURCE-$0}
+BINDIR=$(cd -P -- $(dirname -- ${this}) /dev/null  pwd -P)
+CWD=$(pwd)
+USER_PARAMS=($@)
+GLOBALTIMER=$(date +%s)
+
+## @description  Setup the default global variables
+## @audience public
+## @stabilitystable
+## @replaceable  no
+function setup_defaults
+{
+  if [[ -z ${MAVEN_HOME:-} ]]; then
+MVN=mvn
+  else
+MVN=${MAVEN_HOME}/bin/mvn
+  fi
 
-### Setup some variables.  
-### BUILD_URL is set by Hudson if it is run by patch process
-### Read variables from properties file
-bindir=$(dirname $0)
-
-# Defaults
-if [ -z $MAVEN_HOME ]; then
-  MVN=mvn
-else
-  MVN=$MAVEN_HOME/bin/mvn
-fi
+  PROJECT_NAME=hadoop
+  JENKINS=false
+  PATCH_DIR=/tmp/${PROJECT_NAME}-test-patch/$$
+  BASEDIR=$(pwd)
+
+  FINDBUGS_HOME=${FINDBUGS_HOME:-}
+  ECLIPSE_HOME=${ECLIPSE_HOME:-}
+  BUILD_NATIVE=${BUILD_NATIVE:-true}
+  PATCH_BRANCH=
+  CHANGED_MODULES=
+  USER_MODULE_LIST=
+  OFFLINE=false
+  CHANGED_FILES=
+  REEXECED=false
+  RESETREPO=false
+  ISSUE=
+  ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$'
+  TIMER=$(date +%s)
+
+  OSTYPE=$(uname -s)
+
+  # Solaris needs POSIX, not SVID
+  case ${OSTYPE} in
+SunOS)
+  PS=${PS:-ps}
+  AWK=${AWK:-/usr/xpg4/bin/awk}
+  SED=${SED:-/usr/xpg4/bin/sed}
+  WGET=${WGET:-wget}
+  GIT=${GIT:-git}
+  EGREP=${EGREP:-/usr/xpg4/bin/egrep}
+  GREP=${GREP:-/usr/xpg4/bin/grep}
+  PATCH=${PATCH:-patch}
+  DIFF=${DIFF:-diff}
+  JIRACLI=${JIRA:-jira}
+;;
+*)
+  PS=${PS:-ps}
+  AWK=${AWK:-awk}
+  SED=${SED:-sed}
+  WGET=${WGET:-wget}
+  GIT=${GIT:-git}
+  EGREP=${EGREP:-egrep}
+  GREP=${GREP:-grep}
+  PATCH=${PATCH:-patch}
+  DIFF=${DIFF:-diff}
+  JIRACLI=${JIRA:-jira}
+;;
+  esac
+
+  declare -a JIRA_COMMENT_TABLE
+  declare -a JIRA_FOOTER_TABLE
+  declare -a JIRA_HEADER
+  declare -a JIRA_TEST_TABLE
+
+  JFC=0
+  JTC=0
+  JTT=0
+  RESULT=0
+}
 
-PROJECT_NAME=Hadoop
-JENKINS=false
-PATCH_DIR=/tmp
-SUPPORT_DIR=/tmp
-BASEDIR=$(pwd)
-
-PS=${PS:-ps}
-AWK=${AWK:-awk}
-WGET=${WGET:-wget}
-GIT=${GIT:-git}
-GREP=${GREP:-grep}
-PATCH=${PATCH:-patch}
-JIRACLI=${JIRA:-jira}
-FINDBUGS_HOME=${FINDBUGS_HOME}
-FORREST_HOME=${FORREST_HOME}
-ECLIPSE_HOME=${ECLIPSE_HOME}
+## @description  Print a message to stderr
+## @audience public
+## @stabilitystable
+## @replaceable  no
+## @paramstring
+function hadoop_error
+{
+  echo $* 12
+}
 
-###
-printUsage() {
-  echo Usage: $0 [options] patch-file | defect-number
+## @description  Print a message to stderr if --debug is turned on
+## @audience public
+## @stabilitystable
+## @replaceable  no
+## @paramstring
+function hadoop_debug
+{
+  if [[ -n ${HADOOP_SHELL_SCRIPT_DEBUG} ]]; then
+echo [$(date) DEBUG]: $* 12
+  fi
+}
+
+## @description  Activate the local timer
+## @audience public
+## @stabilitystable
+## @replaceable  no
+function start_clock
+{
+  hadoop_debug Start clock
+  TIMER=$(date +%s)
+}
+
+## @description  Print the elapsed time in seconds since the start of the 
local timer
+## @audience public
+## @stabilitystable
+## @replaceable  no

[1/3] hadoop git commit: HADOOP-11746. rewrite test-patch.sh (aw)

2015-04-21 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 447f2f699 - b7251723d


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7251723/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a146ba3..b83e48c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -12,6 +12,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-9477. Add posixGroups support for LDAP groups mapping service.
 (Dapeng Sun via Yongjun Zhang)
 
+HADOOP-11746. rewrite test-patch.sh (aw)
+
   IMPROVEMENTS
 
 HADOOP-11719. [Fsshell] Remove bin/hadoop reference from



[1/3] hadoop git commit: MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher event thread. (Sangjin Lee via gera)

2015-04-21 Thread gera
Repository: hadoop
Updated Branches:
  refs/heads/trunk 105afd547 - 89ded89e8


MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher event 
thread. (Sangjin Lee via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/725eb52d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/725eb52d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/725eb52d

Branch: refs/heads/trunk
Commit: 725eb52ddc647074f0bf1cc73c3029f1352f51d5
Parents: 105afd5
Author: Gera Shegalov g...@apache.org
Authored: Tue Apr 21 11:46:35 2015 -0700
Committer: Gera Shegalov g...@apache.org
Committed: Tue Apr 21 13:57:22 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../hadoop/mapred/LocalContainerLauncher.java   | 20 
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  2 +-
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  2 +-
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  |  9 +
 5 files changed, 34 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/725eb52d/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ffa01fa..0cf5c4b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -337,6 +337,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
 which is a regression from MR1 (zxu via rkanter)
 
+MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher
+event thread. (Sangjin Lee via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/725eb52d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
index 218ac83..ffc5326 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -80,6 +80,7 @@ public class LocalContainerLauncher extends AbstractService 
implements
   private final HashSetFile localizedFiles;
   private final AppContext context;
   private final TaskUmbilicalProtocol umbilical;
+  private final ClassLoader jobClassLoader;
   private ExecutorService taskRunner;
   private Thread eventHandler;
   private BlockingQueueContainerLauncherEvent eventQueue =
@@ -87,6 +88,12 @@ public class LocalContainerLauncher extends AbstractService 
implements
 
   public LocalContainerLauncher(AppContext context,
 TaskUmbilicalProtocol umbilical) {
+this(context, umbilical, null);
+  }
+
+  public LocalContainerLauncher(AppContext context,
+TaskUmbilicalProtocol umbilical,
+ClassLoader jobClassLoader) {
 super(LocalContainerLauncher.class.getName());
 this.context = context;
 this.umbilical = umbilical;
@@ -94,6 +101,7 @@ public class LocalContainerLauncher extends AbstractService 
implements
 // (TODO/FIXME:  pointless to use RPC to talk to self; should create
 // LocalTaskAttemptListener or similar:  implement umbilical protocol
 // but skip RPC stuff)
+this.jobClassLoader = jobClassLoader;
 
 try {
   curFC = FileContext.getFileContext(curDir.toURI());
@@ -133,6 +141,18 @@ public class LocalContainerLauncher extends 
AbstractService implements
 setDaemon(true).setNameFormat(uber-SubtaskRunner).build());
 // create and start an event handling thread
 eventHandler = new Thread(new EventHandler(), uber-EventHandler);
+// if the job classloader is specified, set it onto the event handler as 
the
+// thread context classloader so that it can be used by the event handler
+// as well as the subtask runner threads
+if (jobClassLoader != null) {
+  LOG.info(Setting  + jobClassLoader +
+   as the context classloader of thread  + eventHandler.getName());
+  eventHandler.setContextClassLoader(jobClassLoader);
+} else {
+  // note the current TCCL
+  LOG.info(Context classloader 

[2/3] hadoop git commit: HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up split calculation (gera)

2015-04-21 Thread gera
HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up split 
calculation (gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d2cf9fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d2cf9fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d2cf9fb

Branch: refs/heads/trunk
Commit: 6d2cf9fbbd02482315a091ab07af26e40cc5134f
Parents: 725eb52
Author: Gera Shegalov g...@apache.org
Authored: Tue Apr 21 11:57:42 2015 -0700
Committer: Gera Shegalov g...@apache.org
Committed: Tue Apr 21 13:57:23 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../org/apache/hadoop/fs/LocatedFileStatus.java |   7 +-
 .../hadoop/fs/viewfs/ChRootedFileSystem.java|  10 +-
 .../org/apache/hadoop/fs/viewfs/InodeTree.java  |   2 +-
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  94 ++---
 .../fs/viewfs/ViewFsLocatedFileStatus.java  | 136 +++
 .../fs/viewfs/TestChRootedFileSystem.java   |  14 ++
 .../fs/viewfs/ViewFileSystemBaseTest.java   | 108 +++
 8 files changed, 327 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d2cf9fb/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bcbffb7..9819300 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -504,6 +504,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11785. Reduce the number of listStatus operation in distcp
 buildListing (Zoran Dimitrijevic via Colin P. McCabe)
 
+HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up
+split calculation (gera)
+
   BUG FIXES
 
 HADOOP-10027. *Compressor_deflateBytesDirect passes instance instead of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d2cf9fb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
index 0136894..9e920c5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
@@ -32,6 +32,11 @@ import org.apache.hadoop.fs.permission.FsPermission;
 public class LocatedFileStatus extends FileStatus {
   private BlockLocation[] locations;
 
+
+  public LocatedFileStatus() {
+super();
+  }
+
   /**
* Constructor 
* @param stat a file status
@@ -43,7 +48,7 @@ public class LocatedFileStatus extends FileStatus {
 stat.getBlockSize(), stat.getModificationTime(),
 stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
 stat.getGroup(), null, stat.getPath(), locations);
-if (isSymlink()) {
+if (stat.isSymlink()) {
   setSymlink(stat.getSymlink());
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d2cf9fb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 9650a37..18e2391 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -37,8 +37,10 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FilterFileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -240,7 +242,13 @@ class ChRootedFileSystem extends FilterFileSystem {
   throws IOException {
 return super.listStatus(fullPath(f));
   }
-  
+
+  @Override
+  public RemoteIteratorLocatedFileStatus listLocatedStatus(Path f)
+  throws IOException {
+return super.listLocatedStatus(fullPath(f));
+  }
+
   @Override
   public 

[3/3] hadoop git commit: MAPREDUCE-6297. Task Id of the failed task in diagnostics should link to the task page. (Siqi Li via gera)

2015-04-21 Thread gera
MAPREDUCE-6297. Task Id of the failed task in diagnostics should link to the 
task page. (Siqi Li via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89ded89e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89ded89e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89ded89e

Branch: refs/heads/trunk
Commit: 89ded89e86e5d9a634d92a5d8a7c889744d97f94
Parents: 6d2cf9f
Author: Gera Shegalov g...@apache.org
Authored: Tue Apr 21 12:36:37 2015 -0700
Committer: Gera Shegalov g...@apache.org
Committed: Tue Apr 21 13:57:23 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../org/apache/hadoop/mapreduce/TaskID.java | 35 
 .../mapreduce/v2/hs/webapp/HsJobBlock.java  |  8 -
 .../mapreduce/v2/hs/webapp/TestBlocks.java  | 20 ++-
 .../v2/hs/webapp/TestHsWebServicesTasks.java| 27 ++-
 5 files changed, 60 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89ded89e/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 0cf5c4b..ccdf6d6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -340,6 +340,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher
 event thread. (Sangjin Lee via gera)
 
+MAPREDUCE-6297. Task Id of the failed task in diagnostics should link to
+the task page. (Siqi Li via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89ded89e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
index 488ffcc..b9817dd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
@@ -25,6 +25,8 @@ import java.text.NumberFormat;
 import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -58,6 +60,9 @@ import org.apache.hadoop.io.WritableUtils;
 public class TaskID extends org.apache.hadoop.mapred.ID {
   protected static final String TASK = task;
   protected static final NumberFormat idFormat = NumberFormat.getInstance();
+  public static final String TASK_ID_REGEX = TASK + _(\\d+)_(\\d+)_ +
+  CharTaskTypeMaps.allTaskTypes + _(\\d+);
+  public static final Pattern taskIdPattern = Pattern.compile(TASK_ID_REGEX);
   static {
 idFormat.setGroupingUsed(false);
 idFormat.setMinimumIntegerDigits(6);
@@ -207,29 +212,15 @@ public class TaskID extends org.apache.hadoop.mapred.ID {
 throws IllegalArgumentException {
 if(str == null)
   return null;
-String exceptionMsg = null;
-try {
-  String[] parts = str.split(_);
-  if(parts.length == 5) {
-if(parts[0].equals(TASK)) {
-  String type = parts[3];
-  TaskType t = CharTaskTypeMaps.getTaskType(type.charAt(0));
-  if(t != null) {
-  
-return new org.apache.hadoop.mapred.TaskID(parts[1], 
- 
Integer.parseInt(parts[2]),
- t, 
- 
Integer.parseInt(parts[4]));
-  } else
-exceptionMsg = Bad TaskType identifier. TaskId string :  + str
-+  is not properly formed.;
-}
-  }
-}catch (Exception ex) {//fall below
-}
-if (exceptionMsg == null) {
-  exceptionMsg = TaskId string :  + str +  is not properly formed;
+Matcher m = taskIdPattern.matcher(str);
+if (m.matches()) {
+  return new org.apache.hadoop.mapred.TaskID(m.group(1),
+  Integer.parseInt(m.group(2)),
+  CharTaskTypeMaps.getTaskType(m.group(3).charAt(0)),
+  Integer.parseInt(m.group(4)));
 }
+String exceptionMsg = TaskId string :  + str +  is not properly formed 
+
+   

[2/3] hadoop git commit: HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up split calculation (gera)

2015-04-21 Thread gera
HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up split 
calculation (gera)

(cherry picked from commit 6d2cf9fbbd02482315a091ab07af26e40cc5134f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1544c636
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1544c636
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1544c636

Branch: refs/heads/branch-2
Commit: 1544c63602089b690e850e0e30af4589513a2371
Parents: 6f5dc9f
Author: Gera Shegalov g...@apache.org
Authored: Tue Apr 21 11:57:42 2015 -0700
Committer: Gera Shegalov g...@apache.org
Committed: Tue Apr 21 14:21:57 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../org/apache/hadoop/fs/LocatedFileStatus.java |   7 +-
 .../hadoop/fs/viewfs/ChRootedFileSystem.java|  10 +-
 .../org/apache/hadoop/fs/viewfs/InodeTree.java  |   2 +-
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  94 ++---
 .../fs/viewfs/ViewFsLocatedFileStatus.java  | 136 +++
 .../fs/viewfs/TestChRootedFileSystem.java   |  14 ++
 .../fs/viewfs/ViewFileSystemBaseTest.java   | 108 +++
 8 files changed, 327 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1544c636/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b83e48c..1c67dda 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -60,6 +60,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11785. Reduce the number of listStatus operation in distcp
 buildListing (Zoran Dimitrijevic via Colin P. McCabe)
 
+HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up
+split calculation (gera)
+
   BUG FIXES
 
 HADOOP-11568. Description on usage of classpath in hadoop command is

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1544c636/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
index 0136894..9e920c5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
@@ -32,6 +32,11 @@ import org.apache.hadoop.fs.permission.FsPermission;
 public class LocatedFileStatus extends FileStatus {
   private BlockLocation[] locations;
 
+
+  public LocatedFileStatus() {
+super();
+  }
+
   /**
* Constructor 
* @param stat a file status
@@ -43,7 +48,7 @@ public class LocatedFileStatus extends FileStatus {
 stat.getBlockSize(), stat.getModificationTime(),
 stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
 stat.getGroup(), null, stat.getPath(), locations);
-if (isSymlink()) {
+if (stat.isSymlink()) {
   setSymlink(stat.getSymlink());
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1544c636/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 9650a37..18e2391 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -37,8 +37,10 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FilterFileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -240,7 +242,13 @@ class ChRootedFileSystem extends FilterFileSystem {
   throws IOException {
 return super.listStatus(fullPath(f));
   }
-  
+
+  @Override
+  public RemoteIteratorLocatedFileStatus listLocatedStatus(Path f)
+  throws IOException {
+return 

[1/3] hadoop git commit: MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher event thread. (Sangjin Lee via gera)

2015-04-21 Thread gera
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0b87ae27a - 6f06e3b7e


MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher event 
thread. (Sangjin Lee via gera)

(cherry picked from commit 725eb52ddc647074f0bf1cc73c3029f1352f51d5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f5dc9f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f5dc9f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f5dc9f5

Branch: refs/heads/branch-2
Commit: 6f5dc9f5af8863aba00a36bff2964c7e3feec513
Parents: 0b87ae2
Author: Gera Shegalov g...@apache.org
Authored: Tue Apr 21 11:46:35 2015 -0700
Committer: Gera Shegalov g...@apache.org
Committed: Tue Apr 21 14:21:56 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../hadoop/mapred/LocalContainerLauncher.java   | 20 
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  2 +-
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  2 +-
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  |  9 +
 5 files changed, 34 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f5dc9f5/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ef0095c..b083d00 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -92,6 +92,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
 which is a regression from MR1 (zxu via rkanter)
 
+MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher
+event thread. (Sangjin Lee via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f5dc9f5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
index 218ac83..ffc5326 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -80,6 +80,7 @@ public class LocalContainerLauncher extends AbstractService 
implements
   private final HashSetFile localizedFiles;
   private final AppContext context;
   private final TaskUmbilicalProtocol umbilical;
+  private final ClassLoader jobClassLoader;
   private ExecutorService taskRunner;
   private Thread eventHandler;
   private BlockingQueueContainerLauncherEvent eventQueue =
@@ -87,6 +88,12 @@ public class LocalContainerLauncher extends AbstractService 
implements
 
   public LocalContainerLauncher(AppContext context,
 TaskUmbilicalProtocol umbilical) {
+this(context, umbilical, null);
+  }
+
+  public LocalContainerLauncher(AppContext context,
+TaskUmbilicalProtocol umbilical,
+ClassLoader jobClassLoader) {
 super(LocalContainerLauncher.class.getName());
 this.context = context;
 this.umbilical = umbilical;
@@ -94,6 +101,7 @@ public class LocalContainerLauncher extends AbstractService 
implements
 // (TODO/FIXME:  pointless to use RPC to talk to self; should create
 // LocalTaskAttemptListener or similar:  implement umbilical protocol
 // but skip RPC stuff)
+this.jobClassLoader = jobClassLoader;
 
 try {
   curFC = FileContext.getFileContext(curDir.toURI());
@@ -133,6 +141,18 @@ public class LocalContainerLauncher extends 
AbstractService implements
 setDaemon(true).setNameFormat(uber-SubtaskRunner).build());
 // create and start an event handling thread
 eventHandler = new Thread(new EventHandler(), uber-EventHandler);
+// if the job classloader is specified, set it onto the event handler as 
the
+// thread context classloader so that it can be used by the event handler
+// as well as the subtask runner threads
+if (jobClassLoader != null) {
+  LOG.info(Setting  + jobClassLoader +
+   as the context classloader of thread  + eventHandler.getName());
+  eventHandler.setContextClassLoader(jobClassLoader);
+} 

[3/3] hadoop git commit: MAPREDUCE-6297. Task Id of the failed task in diagnostics should link to the task page. (Siqi Li via gera)

2015-04-21 Thread gera
MAPREDUCE-6297. Task Id of the failed task in diagnostics should link to the 
task page. (Siqi Li via gera)

(cherry picked from commit 89ded89e86e5d9a634d92a5d8a7c889744d97f94)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f06e3b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f06e3b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f06e3b7

Branch: refs/heads/branch-2
Commit: 6f06e3b7eabe86dfce8dc3e58aa8d5c3aebca000
Parents: 1544c63
Author: Gera Shegalov g...@apache.org
Authored: Tue Apr 21 12:36:37 2015 -0700
Committer: Gera Shegalov g...@apache.org
Committed: Tue Apr 21 14:21:57 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../org/apache/hadoop/mapreduce/TaskID.java | 35 
 .../mapreduce/v2/hs/webapp/HsJobBlock.java  |  8 -
 .../mapreduce/v2/hs/webapp/TestBlocks.java  | 20 ++-
 .../v2/hs/webapp/TestHsWebServicesTasks.java| 27 ++-
 5 files changed, 60 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f06e3b7/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b083d00..0697534 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -95,6 +95,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher
 event thread. (Sangjin Lee via gera)
 
+MAPREDUCE-6297. Task Id of the failed task in diagnostics should link to
+the task page. (Siqi Li via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f06e3b7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
index 488ffcc..b9817dd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
@@ -25,6 +25,8 @@ import java.text.NumberFormat;
 import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -58,6 +60,9 @@ import org.apache.hadoop.io.WritableUtils;
 public class TaskID extends org.apache.hadoop.mapred.ID {
   protected static final String TASK = task;
   protected static final NumberFormat idFormat = NumberFormat.getInstance();
+  public static final String TASK_ID_REGEX = TASK + _(\\d+)_(\\d+)_ +
+  CharTaskTypeMaps.allTaskTypes + _(\\d+);
+  public static final Pattern taskIdPattern = Pattern.compile(TASK_ID_REGEX);
   static {
 idFormat.setGroupingUsed(false);
 idFormat.setMinimumIntegerDigits(6);
@@ -207,29 +212,15 @@ public class TaskID extends org.apache.hadoop.mapred.ID {
 throws IllegalArgumentException {
 if(str == null)
   return null;
-String exceptionMsg = null;
-try {
-  String[] parts = str.split(_);
-  if(parts.length == 5) {
-if(parts[0].equals(TASK)) {
-  String type = parts[3];
-  TaskType t = CharTaskTypeMaps.getTaskType(type.charAt(0));
-  if(t != null) {
-  
-return new org.apache.hadoop.mapred.TaskID(parts[1], 
- 
Integer.parseInt(parts[2]),
- t, 
- 
Integer.parseInt(parts[4]));
-  } else
-exceptionMsg = Bad TaskType identifier. TaskId string :  + str
-+  is not properly formed.;
-}
-  }
-}catch (Exception ex) {//fall below
-}
-if (exceptionMsg == null) {
-  exceptionMsg = TaskId string :  + str +  is not properly formed;
+Matcher m = taskIdPattern.matcher(str);
+if (m.matches()) {
+  return new org.apache.hadoop.mapred.TaskID(m.group(1),
+  Integer.parseInt(m.group(2)),
+  CharTaskTypeMaps.getTaskType(m.group(3).charAt(0)),
+  Integer.parseInt(m.group(4)));
 }
+String 

[1/2] hadoop git commit: HADOOP-11846 TestCertificateUtil.testCorruptPEM failing on Jenkins JDK8. (Larry McCay via stevel)

2015-04-21 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6f06e3b7e - 6d8a49dd6
  refs/heads/trunk 89ded89e8 - 2c1469036


HADOOP-11846 TestCertificateUtil.testCorruptPEM failing on Jenkins JDK8. (Larry 
McCay via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d8a49dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d8a49dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d8a49dd

Branch: refs/heads/branch-2
Commit: 6d8a49dd68406529f7179e7609afa19af6fab0c1
Parents: 6f06e3b
Author: Steve Loughran ste...@apache.org
Authored: Tue Apr 21 22:38:27 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Tue Apr 21 22:38:27 2015 +0100

--
 .../hadoop/security/authentication/util/TestCertificateUtil.java | 4 ++--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d8a49dd/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
index f52b6d2..ce4176c 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
@@ -52,7 +52,7 @@ public class TestCertificateUtil {
 
   @Test
   public void testCorruptPEM() throws Exception {
-String pem = 
LJMLJMMIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w
+String pem = 
MIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w
 + 
CwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRvb3AxDTALBgNVBAsTBFRl
 + 
c3QxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNTAxMDIyMTE5MjRaFw0xNjAxMDIyMTE5MjRaMF8x
 + 
CzAJBgNVBAYTAlVTMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRv
@@ -62,7 +62,7 @@ public class TestCertificateUtil {
 + 
7OPuuaHb25J8isiOyA3RiWuJGQlXTdkCAwEAATANBgkqhkiG9w0BAQUFAAOBgQAdRUyCUqE9sdim
 + 
Fbll9BuZDKV16WXeWGq+kTd7ETe7l0fqXjq5EnrifOai0L/pXwVvS2jrFkKQRlRxRGUNaeEBZ2Wy
 + 
9aTyR+HGHCfvwoCegc9rAVw/DLaRriSO/jnEXzYK6XLVKH+hx5UXrJ7Oyc7JjZUc3g9kCWORThCX
-+ Mzc1xA==;
++ Mzc1xA++;
 try {
   CertificateUtil.parseRSAPublicKey(pem);
   fail(Should not have thrown ServletException);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d8a49dd/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1c67dda..9960a16 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -86,6 +86,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11704. DelegationTokenAuthenticationFilter must pass ipaddress
 instead of hostname to ProxyUsers#authorize (Anubhav Dhoot via asuresh)
 
+HADOOP-11846 TestCertificateUtil.testCorruptPEM failing on Jenkins JDK8.
+(Larry McCay via stevel)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[2/2] hadoop git commit: HADOOP-11846 TestCertificateUtil.testCorruptPEM failing on Jenkins JDK8. (Larry McCay via stevel)

2015-04-21 Thread stevel
HADOOP-11846 TestCertificateUtil.testCorruptPEM failing on Jenkins JDK8. (Larry 
McCay via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c146903
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c146903
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c146903

Branch: refs/heads/trunk
Commit: 2c1469036863c593a1bcba68f7c68b46f2a8e169
Parents: 89ded89
Author: Steve Loughran ste...@apache.org
Authored: Tue Apr 21 22:38:27 2015 +0100
Committer: Steve Loughran ste...@apache.org
Committed: Tue Apr 21 22:38:41 2015 +0100

--
 .../hadoop/security/authentication/util/TestCertificateUtil.java | 4 ++--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c146903/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
index f52b6d2..ce4176c 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
@@ -52,7 +52,7 @@ public class TestCertificateUtil {
 
   @Test
   public void testCorruptPEM() throws Exception {
-String pem = 
LJMLJMMIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w
+String pem = 
MIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w
 + 
CwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRvb3AxDTALBgNVBAsTBFRl
 + 
c3QxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNTAxMDIyMTE5MjRaFw0xNjAxMDIyMTE5MjRaMF8x
 + 
CzAJBgNVBAYTAlVTMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRv
@@ -62,7 +62,7 @@ public class TestCertificateUtil {
 + 
7OPuuaHb25J8isiOyA3RiWuJGQlXTdkCAwEAATANBgkqhkiG9w0BAQUFAAOBgQAdRUyCUqE9sdim
 + 
Fbll9BuZDKV16WXeWGq+kTd7ETe7l0fqXjq5EnrifOai0L/pXwVvS2jrFkKQRlRxRGUNaeEBZ2Wy
 + 
9aTyR+HGHCfvwoCegc9rAVw/DLaRriSO/jnEXzYK6XLVKH+hx5UXrJ7Oyc7JjZUc3g9kCWORThCX
-+ Mzc1xA==;
++ Mzc1xA++;
 try {
   CertificateUtil.parseRSAPublicKey(pem);
   fail(Should not have thrown ServletException);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c146903/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9819300..02066b6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -527,6 +527,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11704. DelegationTokenAuthenticationFilter must pass ipaddress
 instead of hostname to ProxyUsers#authorize (Anubhav Dhoot via asuresh)
 
+HADOOP-11846 TestCertificateUtil.testCorruptPEM failing on Jenkins JDK8.
+(Larry McCay via stevel)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: YARN-3495. Confusing log generated by FairScheduler. Contributed by Brahma Reddy Battula.

2015-04-21 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 73ddb6b4f - 105afd547


YARN-3495. Confusing log generated by FairScheduler. Contributed by Brahma 
Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/105afd54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/105afd54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/105afd54

Branch: refs/heads/trunk
Commit: 105afd54779852c518b978101f23526143e234a5
Parents: 73ddb6b
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Apr 22 05:47:59 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Apr 22 05:47:59 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../server/resourcemanager/scheduler/fair/FairScheduler.java| 5 +++--
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/105afd54/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b0d3bd9..236a7d0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -233,6 +233,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3136. Fixed a synchronization problem of
 AbstractYarnScheduler#getTransferredContainers. (Sunil G via jianhe)
 
+YARN-3495. Confusing log generated by FairScheduler.
+(Brahma Reddy Battula via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/105afd54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index a6c5416..f481de5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -796,7 +796,8 @@ public class FairScheduler extends
   protected synchronized void completedContainer(RMContainer rmContainer,
   ContainerStatus containerStatus, RMContainerEventType event) {
 if (rmContainer == null) {
-  LOG.info(Null container completed...);
+  LOG.info(Container  + containerStatus.getContainerId()
+  +  completed with event  + event);
   return;
 }
 
@@ -809,7 +810,7 @@ public class FairScheduler extends
 container.getId().getApplicationAttemptId().getApplicationId();
 if (application == null) {
   LOG.info(Container  + container +  of +
-   unknown application attempt  + appId +
+   finished application  + appId +
completed with event  + event);
   return;
 }



[Hadoop Wiki] Update of HowToRelease by AndrewWang

2015-04-21 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The HowToRelease page has been changed by AndrewWang:
https://wiki.apache.org/hadoop/HowToRelease?action=diffrev1=70rev2=71

Comment:
no more svn up for the website

  = Preparation =
  
  1. Bulk update Jira to unassign from this release all issues that are 
open non-blockers and send follow-up notification to the developer list that 
this was done.
-   1. If you have not already done so, 
[[http://www.apache.org/dev/release-signing.html#keys-policy|append your code 
signing key]] to the 
[[https://dist.apache.org/repos/dist/release/hadoop/common/KEYS|KEYS]] file on 
the website. Also 
[[http://www.apache.org/dev/release-signing.html#keys-policy|upload your key to 
a public key server]] if you haven't. End users use the KEYS file (along with 
the [[http://www.apache.org/dev/release-signing.html#web-of-trust|web of 
trust]]) to validate that releases were done by an Apache committer. Once you 
commit your changes, log into {{{people.apache.org}}} and pull updates to 
{{{/www/www.apache.org/dist/hadoop/core}}}. For more details on signing 
releases, see [[http://www.apache.org/dev/release-signing.html|Signing 
Releases]] and 
[[http://www.apache.org/dev/mirror-step-by-step.html?Step-By-Step|Step-By-Step 
Guide to Mirroring Releases]].
+   1. If you have not already done so, 
[[http://www.apache.org/dev/release-signing.html#keys-policy|append your code 
signing key]] to the 
[[https://dist.apache.org/repos/dist/release/hadoop/common/KEYS|KEYS]] file. 
Once you commit your changes, they will automatically be propagated to the 
website. Also 
[[http://www.apache.org/dev/release-signing.html#keys-policy|upload your key to 
a public key server]] if you haven't. End users use the KEYS file (along with 
the [[http://www.apache.org/dev/release-signing.html#web-of-trust|web of 
trust]]) to validate that releases were done by an Apache committer. For more 
details on signing releases, see 
[[http://www.apache.org/dev/release-signing.html|Signing Releases]] and 
[[http://www.apache.org/dev/mirror-step-by-step.html?Step-By-Step|Step-By-Step 
Guide to Mirroring Releases]].
1. To deploy artifacts to the Apache Maven repository create 
{{{~/.m2/settings.xml}}}:{{{
  settings xmlns=http://maven.apache.org/SETTINGS/1.0.0;
xmlns:xsi=http://www.w3.org/2001/XMLSchema-instance;


hadoop git commit: HADOOP-11827. Speed-up distcp buildListing() using threadpool (Zoran Dimitrijevic via raviprak)

2015-04-21 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6d8a49dd6 - aa34aa5ca


HADOOP-11827. Speed-up distcp buildListing() using threadpool (Zoran 
Dimitrijevic via raviprak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa34aa5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa34aa5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa34aa5c

Branch: refs/heads/branch-2
Commit: aa34aa5caae8b399b2f333b2f1c92fafe3616622
Parents: 6d8a49d
Author: Ravi Prakash ravip...@altiscale.com
Authored: Tue Apr 21 16:43:02 2015 -0700
Committer: Ravi Prakash ravip...@altiscale.com
Committed: Tue Apr 21 16:50:40 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/tools/DistCpConstants.java|   4 +
 .../apache/hadoop/tools/DistCpOptionSwitch.java |   9 +-
 .../org/apache/hadoop/tools/DistCpOptions.java  |  27 +++
 .../org/apache/hadoop/tools/OptionsParser.java  |  12 ++
 .../apache/hadoop/tools/SimpleCopyListing.java  | 169 +++---
 .../hadoop/tools/util/ProducerConsumer.java | 177 +++
 .../apache/hadoop/tools/util/WorkReport.java|  78 
 .../apache/hadoop/tools/util/WorkRequest.java   |  53 ++
 .../hadoop/tools/util/WorkRequestProcessor.java |  38 
 .../apache/hadoop/tools/TestCopyListing.java|  20 ++-
 .../apache/hadoop/tools/TestIntegration.java|  17 ++
 .../apache/hadoop/tools/TestOptionsParser.java  |  42 +
 .../hadoop/tools/util/TestProducerConsumer.java | 109 
 14 files changed, 728 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa34aa5c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9960a16..f6580f5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -55,6 +55,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11819. HttpServerFunctionalTest#prepareTestWebapp should create web
 app directory if it does not exist. (Rohith via vinayakumarb)
 
+HADOOP-11827. Speed-up distcp buildListing() using threadpool
+(Zoran Dimitrijevic via raviprak)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa34aa5c/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index a1af2af..7ecb6ce 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -23,6 +23,9 @@ package org.apache.hadoop.tools;
  */
 public class DistCpConstants {
 
+  /* Default number of threads to use for building file listing */
+  public static final int DEFAULT_LISTSTATUS_THREADS = 1;
+
   /* Default number of maps to use for DistCp */
   public static final int DEFAULT_MAPS = 20;
 
@@ -47,6 +50,7 @@ public class DistCpConstants {
   public static final String CONF_LABEL_SYNC_FOLDERS = distcp.sync.folders;
   public static final String CONF_LABEL_DELETE_MISSING = 
distcp.delete.missing.source;
   public static final String CONF_LABEL_SSL_CONF = distcp.keystore.resource;
+  public static final String CONF_LABEL_LISTSTATUS_THREADS = 
distcp.liststatus.threads;
   public static final String CONF_LABEL_MAX_MAPS = distcp.max.maps;
   public static final String CONF_LABEL_SOURCE_LISTING = 
distcp.source.listing;
   public static final String CONF_LABEL_COPY_STRATEGY = distcp.copy.strategy;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa34aa5c/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
index e9c7d46..f90319d 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
@@ -83,7 +83,14 @@ public enum DistCpOptionSwitch {
   SSL_CONF(DistCpConstants.CONF_LABEL_SSL_CONF,
   new Option(mapredSslConf, true, Configuration for ssl config file +
   , to use with 

hadoop git commit: YARN-3494. Expose AM resource limit and usage in CS QueueMetrics. Contributed by Rohith Sharmaks

2015-04-21 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk e71d0d87d - bdd90110e


YARN-3494. Expose AM resource limit and usage in CS QueueMetrics. Contributed 
by Rohith Sharmaks


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bdd90110
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bdd90110
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bdd90110

Branch: refs/heads/trunk
Commit: bdd90110e6904b59746812d9a093924a65e72280
Parents: e71d0d8
Author: Jian He jia...@apache.org
Authored: Tue Apr 21 20:06:20 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Tue Apr 21 20:06:20 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../resourcemanager/scheduler/QueueMetrics.java |  15 ++-
 .../scheduler/capacity/AbstractCSQueue.java |  13 +-
 .../scheduler/capacity/CSQueueMetrics.java  | 133 +++
 .../scheduler/capacity/LeafQueue.java   |  13 +-
 .../capacity/TestApplicationLimits.java |  10 +-
 .../scheduler/capacity/TestLeafQueue.java   |   6 +
 7 files changed, 174 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd90110/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8b06dfc..9150372 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -151,6 +151,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3451. Display attempt start time and elapsed time on the web UI.
 (Rohith Sharmaks via jianhe)
 
+YARN-3494. Expose AM resource limit and usage in CS QueueMetrics. (Rohith
+Sharmaks via jianhe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd90110/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 30c1113..09fd73e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -83,16 +83,17 @@ public class QueueMetrics implements MetricsSource {
   static final MetricsInfo RECORD_INFO = info(QueueMetrics,
   Metrics for the resource scheduler);
   protected static final MetricsInfo QUEUE_INFO = info(Queue, Metrics by 
queue);
-  static final MetricsInfo USER_INFO = info(User, Metrics by user);
+  protected static final MetricsInfo USER_INFO =
+  info(User, Metrics by user);
   static final Splitter Q_SPLITTER =
   Splitter.on('.').omitEmptyStrings().trimResults();
 
-  final MetricsRegistry registry;
-  final String queueName;
-  final QueueMetrics parent;
-  final MetricsSystem metricsSystem;
-  private final MapString, QueueMetrics users;
-  private final Configuration conf;
+  protected final MetricsRegistry registry;
+  protected final String queueName;
+  protected final QueueMetrics parent;
+  protected final MetricsSystem metricsSystem;
+  protected final MapString, QueueMetrics users;
+  protected final Configuration conf;
 
   protected QueueMetrics(MetricsSystem ms, String queueName, Queue parent, 
   boolean enableUserMetrics, Configuration conf) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd90110/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 550c6aa..9233e01 100644
--- 

hadoop git commit: YARN-3494. Expose AM resource limit and usage in CS QueueMetrics. Contributed by Rohith Sharmaks (cherry picked from commit bdd90110e6904b59746812d9a093924a65e72280)

2015-04-21 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8a9599fb9 - dc4698bb3


YARN-3494. Expose AM resource limit and usage in CS QueueMetrics. Contributed 
by Rohith Sharmaks
(cherry picked from commit bdd90110e6904b59746812d9a093924a65e72280)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc4698bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc4698bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc4698bb

Branch: refs/heads/branch-2
Commit: dc4698bb3345870df3afdc5aaeea4d66c094bd2b
Parents: 8a9599f
Author: Jian He jia...@apache.org
Authored: Tue Apr 21 20:06:20 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Tue Apr 21 20:34:30 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../resourcemanager/scheduler/QueueMetrics.java |  15 ++-
 .../scheduler/capacity/AbstractCSQueue.java |  13 +-
 .../scheduler/capacity/CSQueueMetrics.java  | 133 +++
 .../scheduler/capacity/LeafQueue.java   |  13 +-
 .../capacity/TestApplicationLimits.java |  10 +-
 .../scheduler/capacity/TestLeafQueue.java   |   6 +
 7 files changed, 174 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc4698bb/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ef78ad0..73a398c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -103,6 +103,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3451. Display attempt start time and elapsed time on the web UI.
 (Rohith Sharmaks via jianhe)
 
+YARN-3494. Expose AM resource limit and usage in CS QueueMetrics. (Rohith
+Sharmaks via jianhe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc4698bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 30c1113..09fd73e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -83,16 +83,17 @@ public class QueueMetrics implements MetricsSource {
   static final MetricsInfo RECORD_INFO = info(QueueMetrics,
   Metrics for the resource scheduler);
   protected static final MetricsInfo QUEUE_INFO = info(Queue, Metrics by 
queue);
-  static final MetricsInfo USER_INFO = info(User, Metrics by user);
+  protected static final MetricsInfo USER_INFO =
+  info(User, Metrics by user);
   static final Splitter Q_SPLITTER =
   Splitter.on('.').omitEmptyStrings().trimResults();
 
-  final MetricsRegistry registry;
-  final String queueName;
-  final QueueMetrics parent;
-  final MetricsSystem metricsSystem;
-  private final MapString, QueueMetrics users;
-  private final Configuration conf;
+  protected final MetricsRegistry registry;
+  protected final String queueName;
+  protected final QueueMetrics parent;
+  protected final MetricsSystem metricsSystem;
+  protected final MapString, QueueMetrics users;
+  protected final Configuration conf;
 
   protected QueueMetrics(MetricsSystem ms, String queueName, Queue parent, 
   boolean enableUserMetrics, Configuration conf) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc4698bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java

hadoop git commit: HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to create BlockReader. Contributed by Tsz Wo Nicholas Sze.

2015-04-21 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 44a214858 - 27f7378a2


HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to create 
BlockReader. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27f7378a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27f7378a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27f7378a

Branch: refs/heads/HDFS-7285
Commit: 27f7378a24e69580939f65674bc69d89e0d7d5e7
Parents: 44a2148
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 21 20:56:39 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Apr 21 20:56:39 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +
 .../apache/hadoop/hdfs/BlockReaderTestUtil.java |  7 +--
 .../hadoop/hdfs/TestBlockReaderFactory.java | 16 +++---
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 58 ++--
 4 files changed, 20 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27f7378a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 8f28285..d8f2e9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -107,3 +107,6 @@
 
 HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.
 (szetszwo)
+
+HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to 
+create BlockReader. (szetszwo via Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27f7378a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
index 88b7f37..829cf03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
@@ -165,20 +165,19 @@ public class BlockReaderTestUtil {
*/
   public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int 
lenToRead)
   throws IOException {
-return getBlockReader(cluster, testBlock, offset, lenToRead);
+return getBlockReader(cluster.getFileSystem(), testBlock, offset, 
lenToRead);
   }
 
   /**
* Get a BlockReader for the given block.
*/
-  public static BlockReader getBlockReader(MiniDFSCluster cluster,
-  LocatedBlock testBlock, int offset, int lenToRead) throws IOException {
+  public static BlockReader getBlockReader(final DistributedFileSystem fs,
+  LocatedBlock testBlock, int offset, long lenToRead) throws IOException {
 InetSocketAddress targetAddr = null;
 ExtendedBlock block = testBlock.getBlock();
 DatanodeInfo[] nodes = testBlock.getLocations();
 targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
 
-final DistributedFileSystem fs = cluster.getFileSystem();
 return new BlockReaderFactory(fs.getClient().getConf()).
   setInetSocketAddress(targetAddr).
   setBlock(block).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27f7378a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
index d8aceff..1a767c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
@@ -250,8 +250,8 @@ public class TestBlockReaderFactory {
   LocatedBlock lblock = locatedBlocks.get(0); // first block
   BlockReader blockReader = null;
   try {
-blockReader = BlockReaderTestUtil.
-getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
+blockReader = BlockReaderTestUtil.getBlockReader(
+cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
 Assert.fail(expected getBlockReader to fail the first time.);
   } catch (Throwable t) { 
 Assert.assertTrue(expected to see 'TCP reads were disabled  +
@@ -265,8 +265,8 @@ public 

hadoop git commit: YARN-3503. Expose disk utilization percentage and bad local and log dir counts in NM metrics. Contributed by Varun Vasudev

2015-04-21 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk bdd90110e - 674c7ef64


YARN-3503. Expose disk utilization percentage and bad local and log dir counts 
in NM metrics. Contributed by Varun Vasudev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/674c7ef6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/674c7ef6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/674c7ef6

Branch: refs/heads/trunk
Commit: 674c7ef64916fabbe59c8d6cdd50ca19cf7ddb7c
Parents: bdd9011
Author: Jian He jia...@apache.org
Authored: Tue Apr 21 20:55:59 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Tue Apr 21 20:57:02 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../server/nodemanager/DirectoryCollection.java | 31 +
 .../nodemanager/LocalDirsHandlerService.java| 21 +
 .../yarn/server/nodemanager/NodeManager.java|  2 +-
 .../nodemanager/metrics/NodeManagerMetrics.java | 48 
 .../nodemanager/TestDirectoryCollection.java| 14 ++
 .../TestLocalDirsHandlerService.java| 31 -
 7 files changed, 148 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/674c7ef6/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9150372..3bb6f89 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -154,6 +154,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3494. Expose AM resource limit and usage in CS QueueMetrics. (Rohith
 Sharmaks via jianhe)
 
+YARN-3503. Expose disk utilization percentage and bad local and log dir 
+counts in NM metrics. (Varun Vasudev via jianhe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/674c7ef6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
index c019aa9..2658918 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
@@ -82,6 +82,8 @@ class DirectoryCollection {
   private float diskUtilizationPercentageCutoff;
   private long diskUtilizationSpaceCutoff;
 
+  private int goodDirsDiskUtilizationPercentage;
+
   /**
* Create collection for the directories specified. No check for free space.
* 
@@ -277,6 +279,7 @@ class DirectoryCollection {
 + dirsFailedCheck.get(dir).message);
   }
 }
+setGoodDirsDiskUtilizationPercentage();
 return setChanged;
   }
 
@@ -390,4 +393,32 @@ class DirectoryCollection {
 diskUtilizationSpaceCutoff  0 ? 0 : diskUtilizationSpaceCutoff;
 this.diskUtilizationSpaceCutoff = diskUtilizationSpaceCutoff;
   }
+
+  private void setGoodDirsDiskUtilizationPercentage() {
+
+long totalSpace = 0;
+long usableSpace = 0;
+
+for (String dir : localDirs) {
+  File f = new File(dir);
+  if (!f.isDirectory()) {
+continue;
+  }
+  totalSpace += f.getTotalSpace();
+  usableSpace += f.getUsableSpace();
+}
+if (totalSpace != 0) {
+  long tmp = ((totalSpace - usableSpace) * 100) / totalSpace;
+  if (Integer.MIN_VALUE  tmp  Integer.MAX_VALUE  tmp) {
+goodDirsDiskUtilizationPercentage = (int) tmp;
+  }
+} else {
+  // got no good dirs
+  goodDirsDiskUtilizationPercentage = 0;
+}
+  }
+
+  public int getGoodDirsDiskUtilizationPercentage() {
+return goodDirsDiskUtilizationPercentage;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/674c7ef6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
--
diff --git 

hadoop git commit: YARN-3503. Expose disk utilization percentage and bad local and log dir counts in NM metrics. Contributed by Varun Vasudev (cherry picked from commit 674c7ef64916fabbe59c8d6cdd50ca1

2015-04-21 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 dc4698bb3 - 613a78338


YARN-3503. Expose disk utilization percentage and bad local and log dir counts 
in NM metrics. Contributed by Varun Vasudev
(cherry picked from commit 674c7ef64916fabbe59c8d6cdd50ca19cf7ddb7c)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/613a7833
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/613a7833
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/613a7833

Branch: refs/heads/branch-2
Commit: 613a7833805c9455ded608ccddf2b9a08fde3816
Parents: dc4698b
Author: Jian He jia...@apache.org
Authored: Tue Apr 21 20:55:59 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Tue Apr 21 21:06:06 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../server/nodemanager/DirectoryCollection.java | 31 +
 .../nodemanager/LocalDirsHandlerService.java| 21 +
 .../yarn/server/nodemanager/NodeManager.java|  2 +
 .../nodemanager/metrics/NodeManagerMetrics.java | 48 
 .../nodemanager/TestDirectoryCollection.java| 14 ++
 .../TestLocalDirsHandlerService.java| 31 -
 7 files changed, 149 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/613a7833/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 73a398c..fce50e7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -106,6 +106,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3494. Expose AM resource limit and usage in CS QueueMetrics. (Rohith
 Sharmaks via jianhe)
 
+YARN-3503. Expose disk utilization percentage and bad local and log dir 
+counts in NM metrics. (Varun Vasudev via jianhe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/613a7833/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
index c019aa9..2658918 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
@@ -82,6 +82,8 @@ class DirectoryCollection {
   private float diskUtilizationPercentageCutoff;
   private long diskUtilizationSpaceCutoff;
 
+  private int goodDirsDiskUtilizationPercentage;
+
   /**
* Create collection for the directories specified. No check for free space.
* 
@@ -277,6 +279,7 @@ class DirectoryCollection {
 + dirsFailedCheck.get(dir).message);
   }
 }
+setGoodDirsDiskUtilizationPercentage();
 return setChanged;
   }
 
@@ -390,4 +393,32 @@ class DirectoryCollection {
 diskUtilizationSpaceCutoff  0 ? 0 : diskUtilizationSpaceCutoff;
 this.diskUtilizationSpaceCutoff = diskUtilizationSpaceCutoff;
   }
+
+  private void setGoodDirsDiskUtilizationPercentage() {
+
+long totalSpace = 0;
+long usableSpace = 0;
+
+for (String dir : localDirs) {
+  File f = new File(dir);
+  if (!f.isDirectory()) {
+continue;
+  }
+  totalSpace += f.getTotalSpace();
+  usableSpace += f.getUsableSpace();
+}
+if (totalSpace != 0) {
+  long tmp = ((totalSpace - usableSpace) * 100) / totalSpace;
+  if (Integer.MIN_VALUE  tmp  Integer.MAX_VALUE  tmp) {
+goodDirsDiskUtilizationPercentage = (int) tmp;
+  }
+} else {
+  // got no good dirs
+  goodDirsDiskUtilizationPercentage = 0;
+}
+  }
+
+  public int getGoodDirsDiskUtilizationPercentage() {
+return goodDirsDiskUtilizationPercentage;
+  }
 }


hadoop git commit: HDFS-8212. DistributedFileSystem.createErasureCodingZone should pass schema in FileSystemLinkResolver. Contributed by Tsz Wo Nicholas Sze.

2015-04-21 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 27f7378a2 - 9126e5eb5


HDFS-8212. DistributedFileSystem.createErasureCodingZone should pass schema in 
FileSystemLinkResolver. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9126e5eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9126e5eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9126e5eb

Branch: refs/heads/HDFS-7285
Commit: 9126e5eb5ed5a68e0ab41e3de8d26e43ad575b86
Parents: 27f7378
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 21 21:03:07 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Apr 21 21:03:07 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  | 3 +++
 .../main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9126e5eb/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index d8f2e9d..3d86f05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -110,3 +110,6 @@
 
 HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to 
 create BlockReader. (szetszwo via Zhe Zhang)
+
+HDFS-8212. DistributedFileSystem.createErasureCodingZone should pass schema
+in FileSystemLinkResolver. (szetszwo via Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9126e5eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 833e1e8..faa8d66 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2277,7 +2277,7 @@ public class DistributedFileSystem extends FileSystem {
   @Override
   public Void doCall(final Path p) throws IOException,
   UnresolvedLinkException {
-dfs.createErasureCodingZone(getPathName(p), null);
+dfs.createErasureCodingZone(getPathName(p), schema);
 return null;
   }
 



[2/2] hadoop git commit: HDFS-8185. Separate client related routines in HAUtil into a new class. Contributed by Haohui Mai.

2015-04-21 Thread wheat9
HDFS-8185. Separate client related routines in HAUtil into a new class. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f8003dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f8003dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f8003dc

Branch: refs/heads/trunk
Commit: 6f8003dc7bc9e8be7b0512c514d370c303faf003
Parents: 674c7ef
Author: Haohui Mai whe...@apache.org
Authored: Thu Apr 16 15:45:46 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Tue Apr 21 21:59:47 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   | 172 +++
 .../org/apache/hadoop/hdfs/HAUtilClient.java|  95 
 .../hdfs/client/HdfsClientConfigKeys.java   |   6 +
 .../hdfs/protocol/HdfsConstantsClient.java  |   6 +
 .../hadoop/hdfs/web/WebHdfsConstants.java   |   3 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   8 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  17 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java| 217 +++
 .../hadoop/hdfs/DistributedFileSystem.java  |   2 +-
 .../java/org/apache/hadoop/hdfs/HAUtil.java |  74 +--
 .../org/apache/hadoop/hdfs/NameNodeProxies.java |   8 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java |   7 -
 .../datanode/web/webhdfs/ParameterParser.java   |   8 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |   4 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  24 +-
 .../apache/hadoop/hdfs/tools/DFSHAAdmin.java|   5 +-
 .../hdfs/tools/DelegationTokenFetcher.java  |   7 +-
 .../hadoop/hdfs/web/SWebHdfsFileSystem.java |   4 +-
 .../org/apache/hadoop/hdfs/web/TokenAspect.java |  10 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  20 +-
 .../org/apache/hadoop/fs/TestSymlinkHdfs.java   |   3 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   2 +-
 .../hadoop/hdfs/TestDFSClientRetries.java   |   6 +-
 .../org/apache/hadoop/hdfs/TestDFSUtil.java |   6 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  |   4 +-
 .../apache/hadoop/hdfs/TestEncryptionZones.java |   7 +-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |   6 +-
 .../sasl/SaslDataTransferTestCase.java  |   1 -
 .../hdfs/security/TestDelegationToken.java  |   3 +-
 .../TestDelegationTokenForProxyUser.java|   3 +-
 .../blockmanagement/TestReplicationPolicy.java  |   2 -
 .../web/webhdfs/TestParameterParser.java|   7 +-
 .../hdfs/server/namenode/TestAuditLogs.java |   9 +-
 .../hdfs/server/namenode/TestMalformedURLs.java |   4 +-
 .../TestNameNodeRespectsBindHostKeys.java   |   3 +-
 .../namenode/ha/TestDelegationTokensWithHA.java |  12 +-
 .../server/namenode/ha/TestHAConfiguration.java |   2 +-
 .../hadoop/hdfs/tools/TestDFSHAAdmin.java   |   4 +-
 .../hdfs/web/TestFSMainOperationsWebHdfs.java   |   2 +-
 .../hadoop/hdfs/web/TestHttpsFileSystem.java|   1 -
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java |  27 ++-
 .../apache/hadoop/hdfs/web/TestWebHDFSAcl.java  |   5 +-
 .../hadoop/hdfs/web/TestWebHDFSForHA.java   |   3 +-
 .../hadoop/hdfs/web/TestWebHDFSXAttr.java   |   2 +-
 .../hdfs/web/TestWebHdfsFileSystemContract.java |   5 +-
 .../hadoop/hdfs/web/TestWebHdfsTimeouts.java|   2 +-
 .../apache/hadoop/hdfs/web/TestWebHdfsUrl.java  |   2 +-
 .../web/TestWebHdfsWithMultipleNameNodes.java   |   3 +-
 .../apache/hadoop/hdfs/web/WebHdfsTestUtil.java |  13 +-
 50 files changed, 448 insertions(+), 401 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 2817b66..84fb12c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -17,11 +17,28 @@
  */
 package org.apache.hadoop.hdfs;
 
+import com.google.common.base.Joiner;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.UnsupportedEncodingException;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+import 

[2/2] hadoop git commit: HDFS-8185. Separate client related routines in HAUtil into a new class. Contributed by Haohui Mai.

2015-04-21 Thread wheat9
HDFS-8185. Separate client related routines in HAUtil into a new class. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f02ca4ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f02ca4ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f02ca4ab

Branch: refs/heads/branch-2
Commit: f02ca4ab158aa2257e839a1f74bc8254e1a3d61b
Parents: 613a783
Author: Haohui Mai whe...@apache.org
Authored: Thu Apr 16 15:45:46 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Tue Apr 21 22:09:13 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   | 172 +++
 .../org/apache/hadoop/hdfs/HAUtilClient.java|  95 
 .../hdfs/client/HdfsClientConfigKeys.java   |   6 +
 .../hdfs/protocol/HdfsConstantsClient.java  |   6 +
 .../hadoop/hdfs/web/WebHdfsConstants.java   |   3 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   8 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  17 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java| 217 +++
 .../hadoop/hdfs/DistributedFileSystem.java  |   2 +-
 .../java/org/apache/hadoop/hdfs/HAUtil.java |  74 +--
 .../org/apache/hadoop/hdfs/NameNodeProxies.java |   8 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java |   7 -
 .../datanode/web/webhdfs/ParameterParser.java   |   8 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |   4 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  25 +--
 .../apache/hadoop/hdfs/tools/DFSHAAdmin.java|   5 +-
 .../hdfs/tools/DelegationTokenFetcher.java  |   2 +
 .../hadoop/hdfs/web/SWebHdfsFileSystem.java |   4 +-
 .../org/apache/hadoop/hdfs/web/TokenAspect.java |  10 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  20 +-
 .../org/apache/hadoop/fs/TestSymlinkHdfs.java   |   3 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   2 +-
 .../hadoop/hdfs/TestDFSClientRetries.java   |   6 +-
 .../org/apache/hadoop/hdfs/TestDFSUtil.java |   6 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  |   4 +-
 .../apache/hadoop/hdfs/TestEncryptionZones.java |   7 +-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |   6 +-
 .../sasl/SaslDataTransferTestCase.java  |   1 -
 .../hdfs/security/TestDelegationToken.java  |   3 +-
 .../TestDelegationTokenForProxyUser.java|   3 +-
 .../blockmanagement/TestReplicationPolicy.java  |   2 -
 .../web/webhdfs/TestParameterParser.java|   7 +-
 .../hdfs/server/namenode/TestAuditLogs.java |   9 +-
 .../hdfs/server/namenode/TestMalformedURLs.java |   4 +-
 .../TestNameNodeRespectsBindHostKeys.java   |   3 +-
 .../namenode/ha/TestDelegationTokensWithHA.java |  12 +-
 .../server/namenode/ha/TestHAConfiguration.java |   2 +-
 .../hadoop/hdfs/tools/TestDFSHAAdmin.java   |   4 +-
 .../hdfs/web/TestFSMainOperationsWebHdfs.java   |   2 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java |  26 +--
 .../apache/hadoop/hdfs/web/TestWebHDFSAcl.java  |   5 +-
 .../hadoop/hdfs/web/TestWebHDFSForHA.java   |   3 +-
 .../hadoop/hdfs/web/TestWebHDFSXAttr.java   |   2 +-
 .../hdfs/web/TestWebHdfsFileSystemContract.java |   4 +-
 .../hadoop/hdfs/web/TestWebHdfsTimeouts.java|   2 +-
 .../apache/hadoop/hdfs/web/TestWebHdfsUrl.java  |   2 +-
 .../web/TestWebHdfsWithMultipleNameNodes.java   |   2 +-
 .../apache/hadoop/hdfs/web/WebHdfsTestUtil.java |  13 +-
 49 files changed, 447 insertions(+), 394 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02ca4ab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 2817b66..84fb12c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -17,11 +17,28 @@
  */
 package org.apache.hadoop.hdfs;
 
+import com.google.common.base.Joiner;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.UnsupportedEncodingException;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+
+import static 

[1/2] hadoop git commit: HDFS-8185. Separate client related routines in HAUtil into a new class. Contributed by Haohui Mai.

2015-04-21 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 613a78338 - f02ca4ab1


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02ca4ab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 944a634..3f89267 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@@ -61,7 +60,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.test.PathUtils;
-import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.log4j.spi.LoggingEvent;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02ca4ab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
index 217d6b5..59fd18f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.HAUtilClient;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.resources.DelegationParam;
 import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
@@ -30,11 +30,8 @@ import org.junit.Test;
 
 import io.netty.handler.codec.http.QueryStringDecoder;
 
-import javax.servlet.ServletContext;
-
 import java.io.IOException;
 
-import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 
 public class TestParameterParser {
@@ -51,7 +48,7 @@ public class TestParameterParser {
   + DelegationParam.NAME + = + token.encodeToUrlString());
 ParameterParser testParser = new ParameterParser(decoder, conf);
 final TokenDelegationTokenIdentifier tok2 = testParser.delegationToken();
-Assert.assertTrue(HAUtil.isTokenForLogicalUri(tok2));
+Assert.assertTrue(HAUtilClient.isTokenForLogicalUri(tok2));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f02ca4ab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
index d034cc3..231fc55 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.web.HftpFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.security.AccessControlException;
@@ -198,7 +199,7 @@ public class TestAuditLogs {
 
 setupAuditLogs();
 
-WebHdfsFileSystem webfs = 
WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, 

[1/2] hadoop git commit: HDFS-8185. Separate client related routines in HAUtil into a new class. Contributed by Haohui Mai.

2015-04-21 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 674c7ef64 - 6f8003dc7


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 32fae45..296003f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@@ -63,7 +62,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.test.PathUtils;
-import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.log4j.spi.LoggingEvent;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
index 217d6b5..59fd18f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.HAUtilClient;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.resources.DelegationParam;
 import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
@@ -30,11 +30,8 @@ import org.junit.Test;
 
 import io.netty.handler.codec.http.QueryStringDecoder;
 
-import javax.servlet.ServletContext;
-
 import java.io.IOException;
 
-import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 
 public class TestParameterParser {
@@ -51,7 +48,7 @@ public class TestParameterParser {
   + DelegationParam.NAME + = + token.encodeToUrlString());
 ParameterParser testParser = new ParameterParser(decoder, conf);
 final TokenDelegationTokenIdentifier tok2 = testParser.delegationToken();
-Assert.assertTrue(HAUtil.isTokenForLogicalUri(tok2));
+Assert.assertTrue(HAUtilClient.isTokenForLogicalUri(tok2));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
index 7d06241..0699c31 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.security.AccessControlException;
@@ -196,7 +197,7 @@ public class TestAuditLogs {
 
 setupAuditLogs();
 
-WebHdfsFileSystem webfs = 
WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, 

[Hadoop Wiki] Update of HowToRelease by AndrewWang

2015-04-21 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The HowToRelease page has been changed by AndrewWang:
https://wiki.apache.org/hadoop/HowToRelease?action=diffrev1=69rev2=70

Comment:
update KEYS file instructions

  = Preparation =
  
  1. Bulk update Jira to unassign from this release all issues that are 
open non-blockers and send follow-up notification to the developer list that 
this was done.
+   1. If you have not already done so, 
[[http://www.apache.org/dev/release-signing.html#keys-policy|append your code 
signing key]] to the 
[[https://dist.apache.org/repos/dist/release/hadoop/common/KEYS|KEYS]] file on 
the website. Also 
[[http://www.apache.org/dev/release-signing.html#keys-policy|upload your key to 
a public key server]] if you haven't. End users use the KEYS file (along with 
the [[http://www.apache.org/dev/release-signing.html#web-of-trust|web of 
trust]]) to validate that releases were done by an Apache committer. Once you 
commit your changes, log into {{{people.apache.org}}} and pull updates to 
{{{/www/www.apache.org/dist/hadoop/core}}}. For more details on signing 
releases, see [[http://www.apache.org/dev/release-signing.html|Signing 
Releases]] and 
[[http://www.apache.org/dev/mirror-step-by-step.html?Step-By-Step|Step-By-Step 
Guide to Mirroring Releases]].
-   1. If you have not already done so, update your @apache.org account via 
[[http://id.apache.org/|id.apache.org]] with your key; also add and commit your 
public key to the Hadoop repository 
[[http://svn.apache.org/repos/asf/hadoop/common/dist/KEYS|KEYS]], appending the 
output of the following commands:{{{
- gpg --armor --fingerprint --list-sigs keyid
- gpg --armor --export keyid
- }}} and publish your key at [[http://pgp.mit.edu/]]. Once you commit your 
changes, log into {{{people.apache.org}}} and pull updates to 
{{{/www/www.apache.org/dist/hadoop/core}}}. For more details on signing 
releases, see [[http://www.apache.org/dev/release-signing.html|Signing 
Releases]] and 
[[http://www.apache.org/dev/mirror-step-by-step.html?Step-By-Step|Step-By-Step 
Guide to Mirroring Releases]].
1. To deploy artifacts to the Apache Maven repository create 
{{{~/.m2/settings.xml}}}:{{{
  settings xmlns=http://maven.apache.org/SETTINGS/1.0.0;
xmlns:xsi=http://www.w3.org/2001/XMLSchema-instance;


hadoop git commit: YARN-3410. YARN admin should be able to remove individual application records from RMStateStore. (Rohith Sharmaks via wangda)

2015-04-21 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 aa34aa5ca - 8a9599fb9


YARN-3410. YARN admin should be able to remove individual application records 
from RMStateStore. (Rohith Sharmaks via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a9599fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a9599fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a9599fb

Branch: refs/heads/branch-2
Commit: 8a9599fb9e644ff4934c523cbf595180a963db0f
Parents: aa34aa5
Author: Wangda Tan wan...@apache.org
Authored: Tue Apr 21 17:47:27 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Tue Apr 21 17:47:27 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 hadoop-yarn-project/hadoop-yarn/bin/yarn|  4 ++-
 .../server/resourcemanager/ResourceManager.java | 34 ++--
 .../recovery/FileSystemRMStateStore.java|  9 ++
 .../recovery/LeveldbRMStateStore.java   | 12 +++
 .../recovery/MemoryRMStateStore.java|  4 +++
 .../recovery/NullRMStateStore.java  |  5 +++
 .../resourcemanager/recovery/RMStateStore.java  |  9 ++
 .../recovery/ZKRMStateStore.java|  9 ++
 .../recovery/RMStateStoreTestBase.java  | 15 +
 .../recovery/TestFSRMStateStore.java|  1 +
 .../recovery/TestLeveldbRMStateStore.java   |  6 
 .../recovery/TestZKRMStateStore.java|  1 +
 .../src/site/markdown/YarnCommands.md   |  1 +
 14 files changed, 110 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a9599fb/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 06b97e4..ef78ad0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -42,6 +42,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3463. Integrate OrderingPolicy Framework with CapacityScheduler.
 (Craig Welch via wangda)
 
+YARN-3410. YARN admin should be able to remove individual application 
+records from RMStateStore. (Rohith Sharmaks via wangda)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a9599fb/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index fa6fec4..d8bd077 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -69,8 +69,10 @@ function print_usage(){
   echo   CLASSNAME run the class named CLASSNAME
   echo  or
   echo   where COMMAND is one of:
-  echo   resourcemanager -format-state-store   deletes the RMStateStore
   echo   resourcemanager   run the ResourceManager
+  echo Use -format-state-store for 
deleting the RMStateStore.
+  echo Use 
-remove-application-from-state-store appId for 
+  echo removing application from 
RMStateStore.
   echo   nodemanager   run a nodemanager on each 
slave
   echo   timelineserverrun the timeline server
   echo   rmadmin   admin tools

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a9599fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 8bd8e21..130cfd4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -93,6 +93,7 @@ import 
org.apache.hadoop.yarn.server.webproxy.AppReportFetcher;
 import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxy;
 import 

[2/2] hadoop git commit: MAPREDUCE-6327. Made MR AM use timeline service v2 API to write history events and counters. Contributed by Junping Du.

2015-04-21 Thread zjshen
MAPREDUCE-6327. Made MR AM use timeline service v2 API to write history events 
and counters. Contributed by Junping Du.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5eeb2b15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5eeb2b15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5eeb2b15

Branch: refs/heads/YARN-2928
Commit: 5eeb2b156f8e108205945f0a1d06873cb51c3527
Parents: ce6aa1b
Author: Zhijie Shen zjs...@apache.org
Authored: Tue Apr 21 16:31:33 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:31:33 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  15 ++
 .../jobhistory/JobHistoryEventHandler.java  | 258 ---
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  24 ++
 .../v2/app/rm/RMContainerAllocator.java |   9 +
 .../hadoop/mapreduce/jobhistory/TestEvents.java |   8 +-
 .../jobhistory/TestJobHistoryEventHandler.java  |   9 +-
 .../apache/hadoop/mapreduce/MRJobConfig.java|   5 +
 .../mapreduce/jobhistory/AMStartedEvent.java|  18 ++
 .../mapreduce/jobhistory/HistoryEvent.java  |   4 +
 .../mapreduce/jobhistory/JobFinishedEvent.java  |  25 ++
 .../jobhistory/JobInfoChangeEvent.java  |  11 +
 .../mapreduce/jobhistory/JobInitedEvent.java|  14 +
 .../jobhistory/JobPriorityChangeEvent.java  |  10 +
 .../jobhistory/JobQueueChangeEvent.java |  10 +
 .../jobhistory/JobStatusChangedEvent.java   |  10 +
 .../mapreduce/jobhistory/JobSubmittedEvent.java |  23 ++
 .../JobUnsuccessfulCompletionEvent.java |  16 ++
 .../jobhistory/MapAttemptFinishedEvent.java |  24 +-
 .../jobhistory/NormalizedResourceEvent.java |  11 +
 .../jobhistory/ReduceAttemptFinishedEvent.java  |  25 +-
 .../jobhistory/TaskAttemptFinishedEvent.java|  19 ++
 .../jobhistory/TaskAttemptStartedEvent.java |  18 ++
 .../TaskAttemptUnsuccessfulCompletionEvent.java |  24 ++
 .../mapreduce/jobhistory/TaskFailedEvent.java   |  19 ++
 .../mapreduce/jobhistory/TaskFinishedEvent.java |  18 ++
 .../mapreduce/jobhistory/TaskStartedEvent.java  |  12 +
 .../mapreduce/jobhistory/TaskUpdatedEvent.java  |  10 +
 .../mapreduce/util/JobHistoryEventUtils.java|  51 
 .../src/main/resources/mapred-default.xml   |   7 +
 .../mapred/TestMRTimelineEventHandling.java | 163 +++-
 .../hadoop/mapreduce/v2/MiniMRYarnCluster.java  |  21 +-
 31 files changed, 839 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5eeb2b15/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ccdf6d6..1242547 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1,5 +1,20 @@
 Hadoop MapReduce Change Log
 
+Branch YARN-2928: Timeline Server Next Generation: Phase 1
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+MAPREDUCE-6327. Made MR AM use timeline service v2 API to write history
+events and counters. (Junping Du via zjshen)
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Trunk (Unreleased)
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5eeb2b15/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index a0e7041..6d72095 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -19,6 +19,9 @@
 package org.apache.hadoop.mapreduce.jobhistory;
 
 import java.io.IOException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
@@ -49,11 +52,13 @@ import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.util.JobHistoryEventUtils;
 import 

[1/2] hadoop git commit: MAPREDUCE-6327. Made MR AM use timeline service v2 API to write history events and counters. Contributed by Junping Du.

2015-04-21 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 ce6aa1b1c - 5eeb2b156


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5eeb2b15/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
index eab9026..b3ea26e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
@@ -18,22 +18,45 @@
 
 package org.apache.hadoop.mapred;
 
+import java.io.File;
+import java.io.IOException;
+
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.jobhistory.EventType;
 import org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler;
 import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timeline.TimelineStore;
+import 
org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 
 import org.junit.Assert;
 import org.junit.Test;
 
 public class TestMRTimelineEventHandling {
 
+  private static final String TIMELINE_AUX_SERVICE_NAME = timeline_collector;
+  private static final Log LOG =
+LogFactory.getLog(TestMRTimelineEventHandling.class);
+  
   @Test
   public void testTimelineServiceStartInMiniCluster() throws Exception {
 Configuration conf = new YarnConfiguration();
@@ -47,7 +70,7 @@ public class TestMRTimelineEventHandling {
 MiniMRYarnCluster cluster = null;
 try {
   cluster = new MiniMRYarnCluster(
-  TestJobHistoryEventHandler.class.getSimpleName(), 1);
+TestMRTimelineEventHandling.class.getSimpleName(), 1);
   cluster.init(conf);
   cluster.start();
 
@@ -88,7 +111,7 @@ public class TestMRTimelineEventHandling {
 MiniMRYarnCluster cluster = null;
 try {
   cluster = new MiniMRYarnCluster(
-  TestJobHistoryEventHandler.class.getSimpleName(), 1);
+TestMRTimelineEventHandling.class.getSimpleName(), 1);
   cluster.init(conf);
   cluster.start();
   TimelineStore ts = cluster.getApplicationHistoryServer()
@@ -132,6 +155,140 @@ public class TestMRTimelineEventHandling {
   }
 }
   }
+  
+  @Test
+  public void testMRNewTimelineServiceEventHandling() throws Exception {
+LOG.info(testMRNewTimelineServiceEventHandling start.);
+Configuration conf = new YarnConfiguration();
+conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
+
+// enable new timeline serivce in MR side
+conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_NEW_TIMELINE_SERVICE_ENABLED, 
true);
+
+// enable aux-service based timeline collectors
+conf.set(YarnConfiguration.NM_AUX_SERVICES, TIMELINE_AUX_SERVICE_NAME);
+conf.set(YarnConfiguration.NM_AUX_SERVICES + . + 
TIMELINE_AUX_SERVICE_NAME
+  + .class, PerNodeTimelineCollectorsAuxService.class.getName());
+
+conf.setBoolean(YarnConfiguration.SYSTEM_METRICS_PUBLISHER_ENABLED, true);
+
+MiniMRYarnCluster cluster = null;
+try {
+  cluster = new MiniMRYarnCluster(
+  TestMRTimelineEventHandling.class.getSimpleName(), 1, true);
+  cluster.init(conf);
+  cluster.start();
+  LOG.info(A MiniMRYarnCluster get start.);
+
+  Path inDir = new Path(input);
+  Path outDir = new Path(output);
+  LOG.info(Run 1st job which should be successful.);
+  RunningJob job =
+   

hadoop git commit: HADOOP-11827. Speed-up distcp buildListing() using threadpool (Zoran Dimitrijevic via raviprak)

2015-04-21 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2c1469036 - cfba35505


HADOOP-11827. Speed-up distcp buildListing() using threadpool (Zoran 
Dimitrijevic via raviprak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cfba3550
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cfba3550
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cfba3550

Branch: refs/heads/trunk
Commit: cfba355052df15f8eb6cc9b8e90e2d8492bec7d7
Parents: 2c14690
Author: Ravi Prakash ravip...@altiscale.com
Authored: Tue Apr 21 16:43:02 2015 -0700
Committer: Ravi Prakash ravip...@altiscale.com
Committed: Tue Apr 21 16:49:37 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/tools/DistCpConstants.java|   4 +
 .../apache/hadoop/tools/DistCpOptionSwitch.java |   9 +-
 .../org/apache/hadoop/tools/DistCpOptions.java  |  27 +++
 .../org/apache/hadoop/tools/OptionsParser.java  |  12 ++
 .../apache/hadoop/tools/SimpleCopyListing.java  | 169 +++---
 .../hadoop/tools/util/ProducerConsumer.java | 177 +++
 .../apache/hadoop/tools/util/WorkReport.java|  78 
 .../apache/hadoop/tools/util/WorkRequest.java   |  53 ++
 .../hadoop/tools/util/WorkRequestProcessor.java |  38 
 .../apache/hadoop/tools/TestCopyListing.java|  20 ++-
 .../apache/hadoop/tools/TestIntegration.java|  17 ++
 .../apache/hadoop/tools/TestOptionsParser.java  |  42 +
 .../hadoop/tools/util/TestProducerConsumer.java | 109 
 14 files changed, 728 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfba3550/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 02066b6..a6814f8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -499,6 +499,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11819. HttpServerFunctionalTest#prepareTestWebapp should create web
 app directory if it does not exist. (Rohith via vinayakumarb)
 
+HADOOP-11827. Speed-up distcp buildListing() using threadpool
+(Zoran Dimitrijevic via raviprak)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfba3550/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index a1af2af..7ecb6ce 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -23,6 +23,9 @@ package org.apache.hadoop.tools;
  */
 public class DistCpConstants {
 
+  /* Default number of threads to use for building file listing */
+  public static final int DEFAULT_LISTSTATUS_THREADS = 1;
+
   /* Default number of maps to use for DistCp */
   public static final int DEFAULT_MAPS = 20;
 
@@ -47,6 +50,7 @@ public class DistCpConstants {
   public static final String CONF_LABEL_SYNC_FOLDERS = distcp.sync.folders;
   public static final String CONF_LABEL_DELETE_MISSING = 
distcp.delete.missing.source;
   public static final String CONF_LABEL_SSL_CONF = distcp.keystore.resource;
+  public static final String CONF_LABEL_LISTSTATUS_THREADS = 
distcp.liststatus.threads;
   public static final String CONF_LABEL_MAX_MAPS = distcp.max.maps;
   public static final String CONF_LABEL_SOURCE_LISTING = 
distcp.source.listing;
   public static final String CONF_LABEL_COPY_STRATEGY = distcp.copy.strategy;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfba3550/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
index e9c7d46..f90319d 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
@@ -83,7 +83,14 @@ public enum DistCpOptionSwitch {
   SSL_CONF(DistCpConstants.CONF_LABEL_SSL_CONF,
   new Option(mapredSslConf, true, Configuration for ssl config file +
   , to use with 

[24/29] hadoop git commit: HADOOP-11704. DelegationTokenAuthenticationFilter must pass ipaddress instead of hostname to ProxyUsers#authorize (Anubhav Dhoot via asuresh)

2015-04-21 Thread zjshen
HADOOP-11704. DelegationTokenAuthenticationFilter must pass ipaddress instead 
of hostname to ProxyUsers#authorize (Anubhav Dhoot via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81ec672e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81ec672e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81ec672e

Branch: refs/heads/YARN-2928
Commit: 81ec672e2ce89ad56c128ed1eb1e68f56ae36d73
Parents: 539f79a
Author: Arun Suresh asur...@apache.org
Authored: Tue Apr 21 11:31:51 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:56 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../DelegationTokenAuthenticationFilter.java|  2 +-
 .../DelegationTokenAuthenticationHandler.java   |  2 +-
 .../delegation/web/TestWebDelegationToken.java  | 56 +++-
 4 files changed, 60 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81ec672e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 230717c..5c6d44a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -519,6 +519,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11811. Fix typos in hadoop-project/pom.xml and 
TestAccessControlList.
 (Brahma Reddy Battula via ozawa)
 
+HADOOP-11704. DelegationTokenAuthenticationFilter must pass ipaddress
+instead of hostname to ProxyUsers#authorize (Anubhav Dhoot via asuresh)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81ec672e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
index fbd1129..b6e1a76 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
@@ -239,7 +239,7 @@ public class DelegationTokenAuthenticationFilter
 if (doAsUser != null) {
   ugi = UserGroupInformation.createProxyUser(doAsUser, ugi);
   try {
-ProxyUsers.authorize(ugi, request.getRemoteHost());
+ProxyUsers.authorize(ugi, request.getRemoteAddr());
   } catch (AuthorizationException ex) {
 HttpExceptionUtils.createServletExceptionResponse(response,
 HttpServletResponse.SC_FORBIDDEN, ex);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81ec672e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
index c498f70..3f191de 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -199,7 +199,7 @@ public abstract class DelegationTokenAuthenticationHandler
 requestUgi = UserGroupInformation.createProxyUser(
 doAsUser, requestUgi);
 try {
-  ProxyUsers.authorize(requestUgi, request.getRemoteHost());
+  ProxyUsers.authorize(requestUgi, request.getRemoteAddr());
 } catch (AuthorizationException ex) {
   HttpExceptionUtils.createServletExceptionResponse(response,
   HttpServletResponse.SC_FORBIDDEN, ex);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81ec672e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
--
diff --git 

[17/29] hadoop git commit: HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes for infinite loop (Contributed by Vinayakumar B) Updated CHANGES.TXT for correct version

2015-04-21 Thread zjshen
HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes 
for infinite loop (Contributed by Vinayakumar B)
Updated CHANGES.TXT for correct version


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d09c8b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d09c8b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d09c8b5

Branch: refs/heads/YARN-2928
Commit: 2d09c8b5ce6b5d7a8eef90f2ad2e0ad61e00345f
Parents: bb5d48b
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 21 07:59:43 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:55 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d09c8b5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2d20812..2291855 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -488,9 +488,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-7922. ShortCircuitCache#close is not releasing
 ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
 
-HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
-goes for infinite loop (vinayakumarb)
-
 HDFS-5215. dfs.datanode.du.reserved is not considered while computing
 available space ( Brahma Reddy Battula via Yongjun Zhang)
 
@@ -564,6 +561,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8179. DFSClient#getServerDefaults returns null within 1
 hour of system start. (Xiaoyu Yao via Arpit Agarwal)
 
+HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
+goes for infinite loop (vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[11/29] hadoop git commit: MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options which is a regression from MR1 (zxu via rkanter)

2015-04-21 Thread zjshen
MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options which is 
a regression from MR1 (zxu via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2af27c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2af27c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2af27c7

Branch: refs/heads/YARN-2928
Commit: f2af27c79ce992fc7a354b2c5bc88a375c8b4ff2
Parents: ea458a3
Author: Robert Kanter rkan...@apache.org
Authored: Mon Apr 20 14:14:08 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:54 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../mapred/LocalDistributedCacheManager.java|  6 --
 .../hadoop/mapreduce/JobResourceUploader.java   |  2 +-
 .../hadoop/mapred/TestLocalJobSubmission.java   | 92 
 4 files changed, 96 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2af27c7/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index c81868d..a02ae84 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -334,6 +334,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL
 (rchiang via rkanter)
 
+MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
+which is a regression from MR1 (zxu via rkanter)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2af27c7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
index 1055516..8606ede 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
@@ -100,18 +100,12 @@ class LocalDistributedCacheManager {
 Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf);
 if (archiveClassPaths != null) {
   for (Path p : archiveClassPaths) {
-FileSystem remoteFS = p.getFileSystem(conf);
-p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
-remoteFS.getWorkingDirectory()));
 classpaths.put(p.toUri().getPath().toString(), p);
   }
 }
 Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf);
 if (fileClassPaths != null) {
   for (Path p : fileClassPaths) {
-FileSystem remoteFS = p.getFileSystem(conf);
-p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
-remoteFS.getWorkingDirectory()));
 classpaths.put(p.toUri().getPath().toString(), p);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2af27c7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index eebdf88..134de35 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -127,7 +127,7 @@ class JobResourceUploader {
 Path tmp = new Path(tmpjars);
 Path newPath = copyRemoteFiles(libjarsDir, tmp, conf, replication);
 DistributedCache.addFileToClassPath(
-new Path(newPath.toUri().getPath()), conf);
+new Path(newPath.toUri().getPath()), conf, jtFs);
   }
 }
 


[01/29] hadoop git commit: YARN-3487. CapacityScheduler scheduler lock obtained unnecessarily when calling getQueue (Jason Lowe via wangda)

2015-04-21 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 4459349c8 - ce6aa1b1c


YARN-3487. CapacityScheduler scheduler lock obtained unnecessarily when calling 
getQueue (Jason Lowe via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8dfc8ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8dfc8ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8dfc8ab

Branch: refs/heads/YARN-2928
Commit: b8dfc8abd0660114b18bcb1f2a277cf2c7afb261
Parents: 86762f0
Author: Wangda Tan wan...@apache.org
Authored: Fri Apr 17 17:22:27 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:52 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 2 ++
 .../scheduler/capacity/CapacityScheduler.java| 8 ++--
 2 files changed, 4 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8dfc8ab/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7b2158c..e7622f8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -297,6 +297,8 @@ Release 2.7.1 - UNRELEASED
   OPTIMIZATIONS
 
   BUG FIXES
+YARN-3487. CapacityScheduler scheduler lock obtained unnecessarily when 
+calling getQueue (Jason Lowe via wangda)
 
 Release 2.7.0 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8dfc8ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 5d58b15..1e1623d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -642,7 +642,7 @@ public class CapacityScheduler extends
 return queue;
   }
 
-  public synchronized CSQueue getQueue(String queueName) {
+  public CSQueue getQueue(String queueName) {
 if (queueName == null) {
   return null;
 }
@@ -940,11 +940,7 @@ public class CapacityScheduler extends
   boolean includeChildQueues, boolean recursive) 
   throws IOException {
 CSQueue queue = null;
-
-synchronized (this) {
-  queue = this.queues.get(queueName); 
-}
-
+queue = this.queues.get(queueName);
 if (queue == null) {
   throw new IOException(Unknown queue:  + queueName);
 }



[06/29] hadoop git commit: HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed by Haohui Mai.

2015-04-21 Thread zjshen
HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed 
by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63d0082c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63d0082c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63d0082c

Branch: refs/heads/YARN-2928
Commit: 63d0082c0c1832c87e34d5cc123b697586300f04
Parents: 26a97e2
Author: Haohui Mai whe...@apache.org
Authored: Thu Apr 16 23:13:15 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:53 2015 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   8 +
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  65 +++
 .../org/apache/hadoop/hdfs/protocol/Block.java  | 243 +
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 279 ++
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 512 ++
 .../hdfs/protocol/DatanodeInfoWithStorage.java  |  67 +++
 .../hadoop/hdfs/protocol/ExtendedBlock.java | 123 +
 .../hdfs/protocol/FsPermissionExtension.java|  89 
 .../hdfs/protocol/HdfsConstantsClient.java  |   1 +
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 271 ++
 .../hadoop/hdfs/protocol/LocatedBlock.java  | 206 
 .../hadoop/hdfs/protocol/LocatedBlocks.java | 189 +++
 .../token/block/BlockTokenIdentifier.java   | 189 +++
 .../delegation/DelegationTokenIdentifier.java   | 100 
 .../hadoop/hdfs/web/WebHdfsConstants.java   |  37 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  15 -
 .../org/apache/hadoop/hdfs/protocol/Block.java  | 243 -
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 279 --
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 514 ---
 .../hdfs/protocol/DatanodeInfoWithStorage.java  |  67 ---
 .../hadoop/hdfs/protocol/ExtendedBlock.java | 123 -
 .../hdfs/protocol/FsPermissionExtension.java|  89 
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 271 --
 .../hadoop/hdfs/protocol/LocatedBlock.java  | 217 
 .../hadoop/hdfs/protocol/LocatedBlocks.java | 189 ---
 .../protocol/SnapshottableDirectoryStatus.java  |   3 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   3 +-
 .../token/block/BlockTokenIdentifier.java   | 189 ---
 .../delegation/DelegationTokenIdentifier.java   | 101 
 .../server/blockmanagement/BlockManager.java|  19 +-
 .../BlockStoragePolicySuite.java|   1 -
 .../blockmanagement/DatanodeStorageInfo.java|   3 +
 .../blockmanagement/HeartbeatManager.java   |  11 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   3 +-
 .../server/namenode/FSDirStatAndListingOp.java  |  18 +-
 .../hdfs/server/namenode/FSDirectory.java   |   4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   7 +-
 .../hdfs/server/namenode/FSEditLogOp.java   |   5 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   5 +-
 .../hadoop/hdfs/server/namenode/INode.java  |  13 +-
 .../hdfs/server/namenode/INodeDirectory.java|   9 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   8 +-
 .../hadoop/hdfs/server/namenode/INodeMap.java   |   5 +-
 .../snapshot/FileWithSnapshotFeature.java   |   3 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |   6 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |   4 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |   3 +-
 .../hadoop/hdfs/web/SWebHdfsFileSystem.java |   3 +-
 .../org/apache/hadoop/hdfs/web/TokenAspect.java |  10 +-
 .../hadoop/hdfs/web/WebHdfsConstants.java   |  30 --
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |   5 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java |   8 +-
 .../namenode/TestNamenodeCapacityReport.java|  19 +-
 .../apache/hadoop/hdfs/web/TestWebHdfsUrl.java  |   2 +-
 55 files changed, 2476 insertions(+), 2412 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63d0082c/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 43bc332..478a931 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -1,2 +1,10 @@
 FindBugsFilter
+  Match
+Or
+  Class name=org.apache.hadoop.hdfs.protocol.HdfsFileStatus/
+  Class name=org.apache.hadoop.hdfs.protocol.LocatedBlock/
+  Class 
name=org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier/
+/Or
+Bug 

[21/29] hadoop git commit: HADOOP-11746. rewrite test-patch.sh (aw)

2015-04-21 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/640ffeae/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5c6d44a..bcbffb7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -447,6 +447,8 @@ Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
+HADOOP-11746. rewrite test-patch.sh (aw)
+
   NEW FEATURES
 
 HADOOP-11226. Add a configuration to set ipc.Client's traffic class with



[04/29] hadoop git commit: HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed by Haohui Mai.

2015-04-21 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63d0082c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 850b3bd..cea2b82 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -31,12 +31,12 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
@@ -140,7 +140,7 @@ class FSDirStatAndListingOp {
   }
 
   private static byte getStoragePolicyID(byte inodePolicy, byte parentPolicy) {
-return inodePolicy != BlockStoragePolicySuite.ID_UNSPECIFIED ? inodePolicy 
:
+return inodePolicy != 
HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ? inodePolicy :
 parentPolicy;
   }
 
@@ -176,8 +176,8 @@ class FSDirStatAndListingOp {
   if (targetNode == null)
 return null;
   byte parentStoragePolicy = isSuperUser ?
-  targetNode.getStoragePolicyID() : BlockStoragePolicySuite
-  .ID_UNSPECIFIED;
+  targetNode.getStoragePolicyID() : HdfsConstantsClient
+  .BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
 
   if (!targetNode.isDirectory()) {
 return new DirectoryListing(
@@ -199,7 +199,7 @@ class FSDirStatAndListingOp {
 INode cur = contents.get(startChild+i);
 byte curPolicy = isSuperUser  !cur.isSymlink()?
 cur.getLocalStoragePolicyID():
-BlockStoragePolicySuite.ID_UNSPECIFIED;
+HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
 listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur,
 needLocation, getStoragePolicyID(curPolicy,
 parentStoragePolicy), snapshot, isRawPath, iip);
@@ -254,7 +254,7 @@ class FSDirStatAndListingOp {
 for (int i = 0; i  numOfListing; i++) {
   Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
   listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
-  BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
+  HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, 
Snapshot.CURRENT_STATE_ID,
   false, INodesInPath.fromINode(sRoot));
 }
 return new DirectoryListing(
@@ -277,7 +277,7 @@ class FSDirStatAndListingOp {
 try {
   final INode i = src.getLastINode();
   byte policyId = includeStoragePolicy  i != null  !i.isSymlink() ?
-  i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
+  i.getStoragePolicyID() : 
HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
   return i == null ? null : createFileStatus(
   fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId,
   src.getPathSnapshotId(), isRawPath, src);
@@ -295,7 +295,7 @@ class FSDirStatAndListingOp {
   if (fsd.getINode4DotSnapshot(srcs) != null) {
 return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
 HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
-BlockStoragePolicySuite.ID_UNSPECIFIED);
+HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
   }
   return null;
 }
@@ -322,7 +322,7 @@ class FSDirStatAndListingOp {
 if (fsd.getINode4DotSnapshot(src) != null) {
   return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
   HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
-  BlockStoragePolicySuite.ID_UNSPECIFIED);
+  HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
 }
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63d0082c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 

[25/29] hadoop git commit: MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher event thread. (Sangjin Lee via gera)

2015-04-21 Thread zjshen
MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher event 
thread. (Sangjin Lee via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b5d660c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b5d660c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b5d660c

Branch: refs/heads/YARN-2928
Commit: 7b5d660ca713535620047ea5d2629319c79f
Parents: 632c409
Author: Gera Shegalov g...@apache.org
Authored: Tue Apr 21 11:46:35 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:56 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../hadoop/mapred/LocalContainerLauncher.java   | 20 
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  2 +-
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  2 +-
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  |  9 +
 5 files changed, 34 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5d660c/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ffa01fa..0cf5c4b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -337,6 +337,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
 which is a regression from MR1 (zxu via rkanter)
 
+MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher
+event thread. (Sangjin Lee via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5d660c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
index 218ac83..ffc5326 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -80,6 +80,7 @@ public class LocalContainerLauncher extends AbstractService 
implements
   private final HashSetFile localizedFiles;
   private final AppContext context;
   private final TaskUmbilicalProtocol umbilical;
+  private final ClassLoader jobClassLoader;
   private ExecutorService taskRunner;
   private Thread eventHandler;
   private BlockingQueueContainerLauncherEvent eventQueue =
@@ -87,6 +88,12 @@ public class LocalContainerLauncher extends AbstractService 
implements
 
   public LocalContainerLauncher(AppContext context,
 TaskUmbilicalProtocol umbilical) {
+this(context, umbilical, null);
+  }
+
+  public LocalContainerLauncher(AppContext context,
+TaskUmbilicalProtocol umbilical,
+ClassLoader jobClassLoader) {
 super(LocalContainerLauncher.class.getName());
 this.context = context;
 this.umbilical = umbilical;
@@ -94,6 +101,7 @@ public class LocalContainerLauncher extends AbstractService 
implements
 // (TODO/FIXME:  pointless to use RPC to talk to self; should create
 // LocalTaskAttemptListener or similar:  implement umbilical protocol
 // but skip RPC stuff)
+this.jobClassLoader = jobClassLoader;
 
 try {
   curFC = FileContext.getFileContext(curDir.toURI());
@@ -133,6 +141,18 @@ public class LocalContainerLauncher extends 
AbstractService implements
 setDaemon(true).setNameFormat(uber-SubtaskRunner).build());
 // create and start an event handling thread
 eventHandler = new Thread(new EventHandler(), uber-EventHandler);
+// if the job classloader is specified, set it onto the event handler as 
the
+// thread context classloader so that it can be used by the event handler
+// as well as the subtask runner threads
+if (jobClassLoader != null) {
+  LOG.info(Setting  + jobClassLoader +
+   as the context classloader of thread  + eventHandler.getName());
+  eventHandler.setContextClassLoader(jobClassLoader);
+} else {
+  // note the current TCCL
+  LOG.info(Context classloader of thread  + eventHandler.getName() +
+  :  + 

[29/29] hadoop git commit: HADOOP-11846 TestCertificateUtil.testCorruptPEM failing on Jenkins JDK8. (Larry McCay via stevel)

2015-04-21 Thread zjshen
HADOOP-11846 TestCertificateUtil.testCorruptPEM failing on Jenkins JDK8. (Larry 
McCay via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce6aa1b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce6aa1b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce6aa1b1

Branch: refs/heads/YARN-2928
Commit: ce6aa1b1cbaf9cde17cf722b744ac7096e574a7e
Parents: 02637b4
Author: Steve Loughran ste...@apache.org
Authored: Tue Apr 21 22:38:27 2015 +0100
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:57 2015 -0700

--
 .../hadoop/security/authentication/util/TestCertificateUtil.java | 4 ++--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce6aa1b1/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
index f52b6d2..ce4176c 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
@@ -52,7 +52,7 @@ public class TestCertificateUtil {
 
   @Test
   public void testCorruptPEM() throws Exception {
-String pem = 
LJMLJMMIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w
+String pem = 
MIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w
 + 
CwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRvb3AxDTALBgNVBAsTBFRl
 + 
c3QxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNTAxMDIyMTE5MjRaFw0xNjAxMDIyMTE5MjRaMF8x
 + 
CzAJBgNVBAYTAlVTMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRv
@@ -62,7 +62,7 @@ public class TestCertificateUtil {
 + 
7OPuuaHb25J8isiOyA3RiWuJGQlXTdkCAwEAATANBgkqhkiG9w0BAQUFAAOBgQAdRUyCUqE9sdim
 + 
Fbll9BuZDKV16WXeWGq+kTd7ETe7l0fqXjq5EnrifOai0L/pXwVvS2jrFkKQRlRxRGUNaeEBZ2Wy
 + 
9aTyR+HGHCfvwoCegc9rAVw/DLaRriSO/jnEXzYK6XLVKH+hx5UXrJ7Oyc7JjZUc3g9kCWORThCX
-+ Mzc1xA==;
++ Mzc1xA++;
 try {
   CertificateUtil.parseRSAPublicKey(pem);
   fail(Should not have thrown ServletException);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce6aa1b1/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9819300..02066b6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -527,6 +527,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11704. DelegationTokenAuthenticationFilter must pass ipaddress
 instead of hostname to ProxyUsers#authorize (Anubhav Dhoot via asuresh)
 
+HADOOP-11846 TestCertificateUtil.testCorruptPEM failing on Jenkins JDK8.
+(Larry McCay via stevel)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[13/29] hadoop git commit: YARN-3463. Integrate OrderingPolicy Framework with CapacityScheduler. (Craig Welch via wangda)

2015-04-21 Thread zjshen
YARN-3463. Integrate OrderingPolicy Framework with CapacityScheduler. (Craig 
Welch via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb5d48b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb5d48b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb5d48b6

Branch: refs/heads/YARN-2928
Commit: bb5d48b6e117022473eac83ed9c2bfed4b495603
Parents: 2d5ab10
Author: Wangda Tan wan...@apache.org
Authored: Mon Apr 20 17:12:32 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:54 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../ProportionalCapacityPreemptionPolicy.java   |   5 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  24 -
 .../CapacitySchedulerConfiguration.java |  31 +-
 .../scheduler/capacity/LeafQueue.java   |  63 +---
 .../AbstractComparatorOrderingPolicy.java   |  13 +--
 .../scheduler/policy/FifoOrderingPolicy.java|   7 +-
 .../scheduler/policy/OrderingPolicy.java|   4 +-
 .../webapp/CapacitySchedulerPage.java   |   1 +
 .../dao/CapacitySchedulerLeafQueueInfo.java |   9 ++
 ...estProportionalCapacityPreemptionPolicy.java |  14 ++-
 .../capacity/TestApplicationLimits.java |  15 +--
 .../scheduler/capacity/TestLeafQueue.java   | 102 ++-
 13 files changed, 242 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb5d48b6/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index fe57d64..0baee9d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -146,6 +146,9 @@ Release 2.8.0 - UNRELEASED
 YARN-1402. Update related Web UI and CLI with exposing client API to check
 log aggregation status. (Xuan Gong via junping_du)
 
+YARN-3463. Integrate OrderingPolicy Framework with CapacityScheduler.
+(Craig Welch via wangda)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb5d48b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 87a2a00..2ab4197 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -550,9 +550,8 @@ public class ProportionalCapacityPreemptionPolicy 
implements SchedulingEditPolic
 
 // lock the leafqueue while we scan applications and unreserve
 synchronized (qT.leafQueue) {
-  NavigableSetFiCaSchedulerApp ns = 
-  (NavigableSetFiCaSchedulerApp) qT.leafQueue.getApplications();
-  IteratorFiCaSchedulerApp desc = ns.descendingIterator();
+  IteratorFiCaSchedulerApp desc =   
+qT.leafQueue.getOrderingPolicy().getPreemptionIterator();
   qT.actuallyPreempted = Resources.clone(resToObtain);
   while (desc.hasNext()) {
 FiCaSchedulerApp fc = desc.next();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb5d48b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 4823390..0554c04 100644
--- 

[23/29] hadoop git commit: HADOOP-11746. rewrite test-patch.sh (aw)

2015-04-21 Thread zjshen
HADOOP-11746. rewrite test-patch.sh (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/640ffeae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/640ffeae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/640ffeae

Branch: refs/heads/YARN-2928
Commit: 640ffeae04d985ef92bbaddee4c51b5e0e7b085e
Parents: 39e7929
Author: Allen Wittenauer a...@apache.org
Authored: Tue Apr 21 21:29:45 2015 +0100
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:56 2015 -0700

--
 dev-support/shelldocs.py|   31 +-
 dev-support/test-patch.d/checkstyle.sh  |  149 +
 dev-support/test-patch.d/shellcheck.sh  |  138 +
 dev-support/test-patch.d/whitespace.sh  |   40 +
 dev-support/test-patch.sh   | 2839 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |2 +
 6 files changed, 2430 insertions(+), 769 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/640ffeae/dev-support/shelldocs.py
--
diff --git a/dev-support/shelldocs.py b/dev-support/shelldocs.py
index 2547450..fc7601a 100755
--- a/dev-support/shelldocs.py
+++ b/dev-support/shelldocs.py
@@ -17,6 +17,26 @@ import sys
 import string
 from optparse import OptionParser
 
+asflicense='''
+!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# License); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+--
+'''
+
 def docstrip(key,string):
   string=re.sub(^## @%s  % key ,,string)
   string=string.lstrip()
@@ -220,17 +240,18 @@ def main():
   funcdef.addreturn(line)
 elif line.startswith('function'):
   funcdef.setname(line)
-  if options.skipprnorep:
-if funcdef.getaudience() == Private and \
-   funcdef.getreplace() == No:
+  if options.skipprnorep and \
+funcdef.getaudience() == Private and \
+funcdef.getreplace() == No:
pass
-else:
-  allfuncs.append(funcdef)
+  else:
+allfuncs.append(funcdef)
   funcdef=ShellFunction()
 
   allfuncs=sorted(allfuncs)
 
   outfile=open(options.outfile, w)
+  outfile.write(asflicense)
   for line in toc(allfuncs):
 outfile.write(line)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/640ffeae/dev-support/test-patch.d/checkstyle.sh
--
diff --git a/dev-support/test-patch.d/checkstyle.sh 
b/dev-support/test-patch.d/checkstyle.sh
new file mode 100755
index 000..460709e
--- /dev/null
+++ b/dev-support/test-patch.d/checkstyle.sh
@@ -0,0 +1,149 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the License); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_plugin checkstyle
+
+CHECKSTYLE_TIMER=0
+
+# if it ends in an explicit .sh, then this is shell code.
+# if it doesn't have an extension, we assume it is shell code too
+function checkstyle_filefilter
+{
+  local filename=$1
+
+  if [[ ${filename} =~ \.java$ ]]; then
+add_test checkstyle
+  fi
+}
+
+function checkstyle_preapply
+{
+  verify_needed_test checkstyle
+
+  if [[ $? == 0 ]]; then
+return 0
+  fi
+
+  big_console_header checkstyle plugin: prepatch
+
+  start_clock
+  echo_and_redirect ${PATCH_DIR}/${PATCH_BRANCH}checkstyle.txt ${MVN} test 
checkstyle:checkstyle-aggregate -DskipTests 

[07/29] hadoop git commit: HDFS-8152. Refactoring of lazy persist storage cases. (Arpit Agarwal)

2015-04-21 Thread zjshen
HDFS-8152. Refactoring of lazy persist storage cases. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26a97e2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26a97e2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26a97e2c

Branch: refs/heads/YARN-2928
Commit: 26a97e2c10fd02d3812951be97c5d474c489d99a
Parents: 3c3e4d4
Author: Arpit Agarwal a...@apache.org
Authored: Sun Apr 19 16:09:06 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:53 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../fsdataset/impl/LazyPersistTestCase.java | 152 +++
 .../fsdataset/impl/TestLazyPersistFiles.java|  20 +--
 .../fsdataset/impl/TestLazyPersistPolicy.java   |   8 +-
 .../impl/TestLazyPersistReplicaPlacement.java   |  12 +-
 .../impl/TestLazyPersistReplicaRecovery.java|   4 +-
 .../datanode/fsdataset/impl/TestLazyWriter.java |  14 +-
 .../fsdataset/impl/TestScrLazyPersistFiles.java |  48 --
 8 files changed, 152 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26a97e2c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 30a460f..39ee33e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -448,6 +448,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8165. Move GRANDFATHER_GENERATION_STAMP and GRANDFATER_INODE_ID to
 hdfs-client. (wheat9)
 
+HDFS-8152. Refactoring of lazy persist storage cases. (Arpit Agarwal)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26a97e2c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
index 6adec20..d46964b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
 import static org.apache.hadoop.fs.StorageType.DEFAULT;
@@ -45,6 +47,7 @@ import java.util.List;
 import java.util.Set;
 import java.util.UUID;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -228,11 +231,15 @@ public abstract class LazyPersistTestCase {
* If ramDiskStorageLimit is =0, then RAM_DISK capacity is artificially
* capped. If ramDiskStorageLimit  0 then it is ignored.
*/
-  protected final void startUpCluster(boolean hasTransientStorage,
-  final int ramDiskReplicaCapacity,
-  final boolean useSCR,
-  final boolean useLegacyBlockReaderLocal)
-  throws IOException {
+  protected final void startUpCluster(
+  int numDatanodes,
+  boolean hasTransientStorage,
+  StorageType[] storageTypes,
+  int ramDiskReplicaCapacity,
+  long ramDiskStorageLimit,
+  long evictionLowWatermarkReplicas,
+  boolean useSCR,
+  boolean useLegacyBlockReaderLocal) throws IOException {
 
 Configuration conf = new Configuration();
 conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -243,17 +250,17 @@ public abstract class LazyPersistTestCase {
 HEARTBEAT_RECHECK_INTERVAL_MSEC);
 conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
 LAZY_WRITER_INTERVAL_SEC);
-conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
-EVICTION_LOW_WATERMARK * BLOCK_SIZE);
+conf.setLong(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
+evictionLowWatermarkReplicas * BLOCK_SIZE);
 
 if (useSCR) {
   conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
   // Do not share a client context across tests.
   conf.set(DFS_CLIENT_CONTEXT, 

[22/29] hadoop git commit: HADOOP-11746. rewrite test-patch.sh (aw)

2015-04-21 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/640ffeae/dev-support/test-patch.sh
--
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 574a4fd..6e8679e 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -1,728 +1,1740 @@
 #!/usr/bin/env bash
-#   Licensed under the Apache License, Version 2.0 (the License);
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the License); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
 #
-#   http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
 #
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an AS IS BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
+### BUILD_URL is set by Hudson if it is run by patch process
 
-#set -x
+this=${BASH_SOURCE-$0}
+BINDIR=$(cd -P -- $(dirname -- ${this}) /dev/null  pwd -P)
+CWD=$(pwd)
+USER_PARAMS=($@)
+GLOBALTIMER=$(date +%s)
+
+## @description  Setup the default global variables
+## @audience public
+## @stabilitystable
+## @replaceable  no
+function setup_defaults
+{
+  if [[ -z ${MAVEN_HOME:-} ]]; then
+MVN=mvn
+  else
+MVN=${MAVEN_HOME}/bin/mvn
+  fi
 
-### Setup some variables.  
-### BUILD_URL is set by Hudson if it is run by patch process
-### Read variables from properties file
-bindir=$(dirname $0)
-
-# Defaults
-if [ -z $MAVEN_HOME ]; then
-  MVN=mvn
-else
-  MVN=$MAVEN_HOME/bin/mvn
-fi
+  PROJECT_NAME=hadoop
+  JENKINS=false
+  PATCH_DIR=/tmp/${PROJECT_NAME}-test-patch/$$
+  BASEDIR=$(pwd)
 
-PROJECT_NAME=Hadoop
-JENKINS=false
-PATCH_DIR=/tmp
-SUPPORT_DIR=/tmp
-BASEDIR=$(pwd)
-BUILD_NATIVE=true
-PS=${PS:-ps}
-AWK=${AWK:-awk}
-WGET=${WGET:-wget}
-GIT=${GIT:-git}
-GREP=${GREP:-grep}
-PATCH=${PATCH:-patch}
-DIFF=${DIFF:-diff}
-JIRACLI=${JIRA:-jira}
-FINDBUGS_HOME=${FINDBUGS_HOME}
-FORREST_HOME=${FORREST_HOME}
-ECLIPSE_HOME=${ECLIPSE_HOME}
+  FINDBUGS_HOME=${FINDBUGS_HOME:-}
+  ECLIPSE_HOME=${ECLIPSE_HOME:-}
+  BUILD_NATIVE=${BUILD_NATIVE:-true}
+  PATCH_BRANCH=
+  CHANGED_MODULES=
+  USER_MODULE_LIST=
+  OFFLINE=false
+  CHANGED_FILES=
+  REEXECED=false
+  RESETREPO=false
+  ISSUE=
+  ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$'
+  TIMER=$(date +%s)
+
+  OSTYPE=$(uname -s)
+
+  # Solaris needs POSIX, not SVID
+  case ${OSTYPE} in
+SunOS)
+  PS=${PS:-ps}
+  AWK=${AWK:-/usr/xpg4/bin/awk}
+  SED=${SED:-/usr/xpg4/bin/sed}
+  WGET=${WGET:-wget}
+  GIT=${GIT:-git}
+  EGREP=${EGREP:-/usr/xpg4/bin/egrep}
+  GREP=${GREP:-/usr/xpg4/bin/grep}
+  PATCH=${PATCH:-patch}
+  DIFF=${DIFF:-diff}
+  JIRACLI=${JIRA:-jira}
+;;
+*)
+  PS=${PS:-ps}
+  AWK=${AWK:-awk}
+  SED=${SED:-sed}
+  WGET=${WGET:-wget}
+  GIT=${GIT:-git}
+  EGREP=${EGREP:-egrep}
+  GREP=${GREP:-grep}
+  PATCH=${PATCH:-patch}
+  DIFF=${DIFF:-diff}
+  JIRACLI=${JIRA:-jira}
+;;
+  esac
+
+  declare -a JIRA_COMMENT_TABLE
+  declare -a JIRA_FOOTER_TABLE
+  declare -a JIRA_HEADER
+  declare -a JIRA_TEST_TABLE
+
+  JFC=0
+  JTC=0
+  JTT=0
+  RESULT=0
+}
 
-###
-printUsage() {
-  echo Usage: $0 [options] patch-file | defect-number
+## @description  Print a message to stderr
+## @audience public
+## @stabilitystable
+## @replaceable  no
+## @paramstring
+function hadoop_error
+{
+  echo $* 12
+}
+
+## @description  Print a message to stderr if --debug is turned on
+## @audience public
+## @stabilitystable
+## @replaceable  no
+## @paramstring
+function hadoop_debug
+{
+  if [[ -n ${HADOOP_SHELL_SCRIPT_DEBUG} ]]; then
+echo [$(date) DEBUG]: $* 12
+  fi
+}
+
+## @description  Activate the local timer
+## @audience public
+## @stabilitystable
+## @replaceable  no
+function start_clock
+{
+  hadoop_debug Start clock
+  TIMER=$(date +%s)
+}
+
+## @description  Print the elapsed time in seconds since the start of the 
local timer
+## @audience public
+## 

[19/29] hadoop git commit: HDFS-7993. Provide each Replica details in fsck (Contributed by J.Andreina)

2015-04-21 Thread zjshen
HDFS-7993. Provide each Replica details in fsck (Contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/841638d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/841638d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/841638d4

Branch: refs/heads/YARN-2928
Commit: 841638d4fcb3085f9778cafe73f19148c0f73dbd
Parents: 4926b50
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 21 15:24:49 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:55 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../blockmanagement/DatanodeStorageInfo.java|  6 +-
 .../hdfs/server/namenode/NamenodeFsck.java  | 61 ++---
 .../org/apache/hadoop/hdfs/tools/DFSck.java |  6 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 71 
 5 files changed, 132 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/841638d4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1aa9ce4..6951a08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -526,6 +526,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8173. NPE thrown at DataNode shutdown when HTTP server was not able to
 create (surendra singh lilhore via vinayakumarb)
 
+HDFS-7993. Provide each Replica details in fsck (J.Andreina via 
vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/841638d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index 8c752ac..c6c9001 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -155,7 +155,7 @@ public class DatanodeStorageInfo {
 this.blockReportCount = blockReportCount;
   }
 
-  boolean areBlockContentsStale() {
+  public boolean areBlockContentsStale() {
 return blockContentsStale;
   }
 
@@ -205,11 +205,11 @@ public class DatanodeStorageInfo {
 return getState() == State.FAILED  numBlocks != 0;
   }
 
-  String getStorageID() {
+  public String getStorageID() {
 return storageID;
   }
 
-  StorageType getStorageType() {
+  public StorageType getStorageType() {
 return storageType;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/841638d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index a8586dd..afaec87 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -68,9 +69,11 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
+import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import 

[12/29] hadoop git commit: HDFS-8179. DFSClient#getServerDefaults returns null within 1 hour of system start. (Contributed by Xiaoyu Yao)

2015-04-21 Thread zjshen
HDFS-8179. DFSClient#getServerDefaults returns null within 1 hour of system 
start. (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d5ab10b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d5ab10b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d5ab10b

Branch: refs/heads/YARN-2928
Commit: 2d5ab10bf9ba6eb001d9bc7590179f9a1eb702a8
Parents: f2af27c
Author: Arpit Agarwal a...@apache.org
Authored: Mon Apr 20 15:42:42 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:54 2015 -0700

--
 .../src/main/java/org/apache/hadoop/fs/Trash.java|  5 +
 .../org/apache/hadoop/fs/TrashPolicyDefault.java |  4 ++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../main/java/org/apache/hadoop/hdfs/DFSClient.java  |  4 +++-
 .../hadoop/hdfs/TestDistributedFileSystem.java   | 15 +++
 5 files changed, 28 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d5ab10b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
index 2d5f540..aae5cf7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -33,6 +34,9 @@ import org.apache.hadoop.conf.Configured;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class Trash extends Configured {
+  private static final org.apache.commons.logging.Log LOG =
+  LogFactory.getLog(Trash.class);
+
   private TrashPolicy trashPolicy; // configured trash policy instance
 
   /** 
@@ -84,6 +88,7 @@ public class Trash extends Configured {
 } catch (Exception e) {
   // If we can not determine that trash is enabled server side then
   // bail rather than potentially deleting a file when trash is enabled.
+  LOG.warn(Failed to get server trash configuration, e);
   throw new IOException(Failed to get server trash configuration, e);
 }
 Trash trash = new Trash(fullyResolvedFs, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d5ab10b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index cfb51e2..d6a9b4b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -134,11 +134,11 @@ public class TrashPolicyDefault extends TrashPolicy {
 for (int i = 0; i  2; i++) {
   try {
 if (!fs.mkdirs(baseTrashPath, PERMISSION)) {  // create current
-  LOG.warn(Can't create(mkdir) trash directory: +baseTrashPath);
+  LOG.warn(Can't create(mkdir) trash directory:  + baseTrashPath);
   return false;
 }
   } catch (IOException e) {
-LOG.warn(Can't create trash directory: +baseTrashPath);
+LOG.warn(Can't create trash directory:  + baseTrashPath, e);
 cause = e;
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d5ab10b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8dec32e..2d20812 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -561,6 +561,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8153. Error Message points to wrong parent directory in case of
 path component name length error (Anu Engineer via jitendra)
 
+HDFS-8179. DFSClient#getServerDefaults returns null within 1
+hour of system start. (Xiaoyu Yao via Arpit Agarwal)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[20/29] hadoop git commit: HDFS-8133. Improve readability of deleted block check (Daryn Sharp via Colin P. McCabe)

2015-04-21 Thread zjshen
HDFS-8133. Improve readability of deleted block check (Daryn Sharp via Colin P. 
McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39e7929c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39e7929c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39e7929c

Branch: refs/heads/YARN-2928
Commit: 39e7929ceceef0c3fc3830e68fde64f88abd8f54
Parents: 81ec672
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue Apr 21 11:41:22 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:56 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../blockmanagement/BlockInfoContiguous.java |  4 
 .../server/blockmanagement/BlockManager.java | 19 ++-
 .../hdfs/server/blockmanagement/BlocksMap.java   |  2 +-
 .../hdfs/server/namenode/FSNamesystem.java   |  5 ++---
 .../server/blockmanagement/TestBlockInfo.java| 10 ++
 6 files changed, 30 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39e7929c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e07e45d..e162d28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -452,6 +452,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. (wheat9)
 
+HDFS-8133. Improve readability of deleted block check (Daryn Sharp via
+Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39e7929c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 48069c1..4314ab3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -86,6 +86,10 @@ public class BlockInfoContiguous extends Block
 this.bc = bc;
   }
 
+  public boolean isDeleted() {
+return (bc == null);
+  }
+
   public DatanodeDescriptor getDatanode(int index) {
 DatanodeStorageInfo storage = getStorageInfo(index);
 return storage == null ? null : storage.getDatanodeDescriptor();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39e7929c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 2a7b02a..1db1356 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1165,13 +1165,14 @@ public class BlockManager {
   DatanodeStorageInfo storageInfo,
   DatanodeDescriptor node) throws IOException {
 
-BlockCollection bc = b.corrupted.getBlockCollection();
-if (bc == null) {
+if (b.corrupted.isDeleted()) {
   blockLog.info(BLOCK markBlockAsCorrupt: {} cannot be marked as +
corrupt as it does not belong to any file, b);
   addToInvalidates(b.corrupted, node);
   return;
 } 
+short expectedReplicas =
+b.corrupted.getBlockCollection().getBlockReplication();
 
 // Add replica to the data-node if it is not already there
 if (storageInfo != null) {
@@ -1183,13 +1184,13 @@ public class BlockManager {
 b.reasonCode);
 
 NumberReplicas numberOfReplicas = countNodes(b.stored);
-boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() = bc
-.getBlockReplication();
+boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() =
+expectedReplicas;
 boolean minReplicationSatisfied =
 numberOfReplicas.liveReplicas() = minReplication;
 boolean hasMoreCorruptReplicas = minReplicationSatisfied 
 (numberOfReplicas.liveReplicas() + 

[09/29] hadoop git commit: YARN-3136. Fixed a synchronization problem of AbstractYarnScheduler#getTransferredContainers. Contributed by Sunil G

2015-04-21 Thread zjshen
YARN-3136. Fixed a synchronization problem of 
AbstractYarnScheduler#getTransferredContainers. Contributed by Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bddfcd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bddfcd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bddfcd6

Branch: refs/heads/YARN-2928
Commit: 1bddfcd6019feac788624f2c97d452ddb43d693a
Parents: b8dfc8a
Author: Jian He jia...@apache.org
Authored: Sat Apr 18 12:45:38 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:53 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../dev-support/findbugs-exclude.xml|  8 
 .../ApplicationMasterService.java   | 47 +++-
 .../scheduler/AbstractYarnScheduler.java| 17 ++-
 4 files changed, 51 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bddfcd6/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e7622f8..0f9eef6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -286,6 +286,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3493. RM fails to come up with error Failed to load/recover state 
 when mem settings are changed. (Jian He via wangda)
 
+YARN-3136. Fixed a synchronization problem of
+AbstractYarnScheduler#getTransferredContainers. (Sunil G via jianhe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bddfcd6/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 8aae152..ece8548 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -469,6 +469,14 @@
 Method name=recoverContainersOnNode /
 Bug pattern=RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE /
   /Match
+  Match
+Class 
name=org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler
 /
+Or
+  Field name=rmContext /
+  Field name=applications /
+/Or
+Bug pattern=IS2_INCONSISTENT_SYNC /
+  /Match
   
   !-- Following fields are used in ErrorsAndWarningsBlock, which is not a 
part of analysis of findbugs --
   Match

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bddfcd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index a31127d..ba7b1ad 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -298,32 +298,35 @@ public class ApplicationMasterService extends 
AbstractService implements
 
   // For work-preserving AM restart, retrieve previous attempts' containers
   // and corresponding NM tokens.
-  ListContainer transferredContainers =
-  ((AbstractYarnScheduler) rScheduler)
+  if (app.getApplicationSubmissionContext()
+  .getKeepContainersAcrossApplicationAttempts()) {
+ListContainer transferredContainers = ((AbstractYarnScheduler) 
rScheduler)
 .getTransferredContainers(applicationAttemptId);
-  if (!transferredContainers.isEmpty()) {
-response.setContainersFromPreviousAttempts(transferredContainers);
-ListNMToken nmTokens = new ArrayListNMToken();
-for (Container container : transferredContainers) {
-  try {
-NMToken token = rmContext.getNMTokenSecretManager()
-.createAndGetNMToken(app.getUser(), applicationAttemptId,
-container);
-if (null != token) {
-  nmTokens.add(token);
-}
-  } catch 

[27/29] hadoop git commit: HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up split calculation (gera)

2015-04-21 Thread zjshen
HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up split 
calculation (gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14d48835
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14d48835
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14d48835

Branch: refs/heads/YARN-2928
Commit: 14d488359f05a82a15425b5fc428cfaecfda4171
Parents: 7b5d660
Author: Gera Shegalov g...@apache.org
Authored: Tue Apr 21 11:57:42 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:57 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../org/apache/hadoop/fs/LocatedFileStatus.java |   7 +-
 .../hadoop/fs/viewfs/ChRootedFileSystem.java|  10 +-
 .../org/apache/hadoop/fs/viewfs/InodeTree.java  |   2 +-
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  94 ++---
 .../fs/viewfs/ViewFsLocatedFileStatus.java  | 136 +++
 .../fs/viewfs/TestChRootedFileSystem.java   |  14 ++
 .../fs/viewfs/ViewFileSystemBaseTest.java   | 108 +++
 8 files changed, 327 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14d48835/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bcbffb7..9819300 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -504,6 +504,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11785. Reduce the number of listStatus operation in distcp
 buildListing (Zoran Dimitrijevic via Colin P. McCabe)
 
+HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up
+split calculation (gera)
+
   BUG FIXES
 
 HADOOP-10027. *Compressor_deflateBytesDirect passes instance instead of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14d48835/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
index 0136894..9e920c5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
@@ -32,6 +32,11 @@ import org.apache.hadoop.fs.permission.FsPermission;
 public class LocatedFileStatus extends FileStatus {
   private BlockLocation[] locations;
 
+
+  public LocatedFileStatus() {
+super();
+  }
+
   /**
* Constructor 
* @param stat a file status
@@ -43,7 +48,7 @@ public class LocatedFileStatus extends FileStatus {
 stat.getBlockSize(), stat.getModificationTime(),
 stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
 stat.getGroup(), null, stat.getPath(), locations);
-if (isSymlink()) {
+if (stat.isSymlink()) {
   setSymlink(stat.getSymlink());
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14d48835/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 9650a37..18e2391 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -37,8 +37,10 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FilterFileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -240,7 +242,13 @@ class ChRootedFileSystem extends FilterFileSystem {
   throws IOException {
 return super.listStatus(fullPath(f));
   }
-  
+
+  @Override
+  public RemoteIteratorLocatedFileStatus listLocatedStatus(Path f)
+  throws IOException {
+return super.listLocatedStatus(fullPath(f));
+  }
+
   @Override
   public 

[10/29] hadoop git commit: HDFS-7863. Missing description of some methods and parameters in javadoc of FSDirDeleteOp. Contributed by Brahma Reddy Battula.

2015-04-21 Thread zjshen
HDFS-7863. Missing description of some methods and parameters in javadoc of 
FSDirDeleteOp. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa2a1526
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa2a1526
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa2a1526

Branch: refs/heads/YARN-2928
Commit: aa2a1526cfa811bd065fec758a135bb7783104e8
Parents: 1bddfcd
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Sun Apr 19 18:06:56 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:53 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hdfs/server/namenode/FSDirDeleteOp.java   | 18 ++
 2 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa2a1526/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 062b2e2..25da013 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -517,6 +517,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8142. DistributedFileSystem encryption zone commands should resolve
 relative paths. (Rakesh R via wang)
 
+HDFS-7863. Missing description of some methods and parameters in javadoc of
+FSDirDeleteOp. (Brahma Reddy Battula via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa2a1526/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 2fc4711..02eb1de 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -33,6 +33,7 @@ class FSDirDeleteOp {
   /**
* Delete the target directory and collect the blocks under it
*
+   * @param fsd the FSDirectory instance
* @param iip the INodesInPath instance containing all the INodes for the 
path
* @param collectedBlocks Blocks under the deleted directory
* @param removedINodes INodes that should be removed from inodeMap
@@ -71,6 +72,13 @@ class FSDirDeleteOp {
* p
* For small directory or file the deletion is done in one shot.
*
+   * @param fsn namespace
+   * @param src path name to be deleted
+   * @param recursive boolean true to apply to all sub-directories recursively
+   * @param logRetryCache whether to record RPC ids in editlog for retry cache
+   *  rebuilding
+   * @return blocks collected from the deleted path
+   * @throws IOException
*/
   static BlocksMapUpdateInfo delete(
   FSNamesystem fsn, String src, boolean recursive, boolean logRetryCache)
@@ -99,6 +107,8 @@ class FSDirDeleteOp {
* Note: This is to be used by
* {@link org.apache.hadoop.hdfs.server.namenode.FSEditLog} only.
* br
+   *
+   * @param fsd the FSDirectory instance
* @param src a string representation of a path to an inode
* @param mtime the time the inode is removed
*/
@@ -134,6 +144,13 @@ class FSDirDeleteOp {
* the {@link org.apache.hadoop.hdfs.server.namenode.FSNamesystem} lock.
* p
* For small directory or file the deletion is done in one shot.
+   * @param fsn namespace
+   * @param src path name to be deleted
+   * @param iip the INodesInPath instance containing all the INodes for the 
path
+   * @param logRetryCache whether to record RPC ids in editlog for retry cache
+   *  rebuilding
+   * @return blocks collected from the deleted path
+   * @throws IOException
*/
   static BlocksMapUpdateInfo deleteInternal(
   FSNamesystem fsn, String src, INodesInPath iip, boolean logRetryCache)
@@ -192,6 +209,7 @@ class FSDirDeleteOp {
   /**
* Delete a path from the name space
* Update the count at each ancestor directory with quota
+   * @param fsd the FSDirectory instance
* @param iip the inodes resolved from the path
* @param collectedBlocks blocks collected from the deleted path
* @param removedINodes inodes that should be removed from inodeMap



[28/29] hadoop git commit: MAPREDUCE-6297. Task Id of the failed task in diagnostics should link to the task page. (Siqi Li via gera)

2015-04-21 Thread zjshen
MAPREDUCE-6297. Task Id of the failed task in diagnostics should link to the 
task page. (Siqi Li via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02637b46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02637b46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02637b46

Branch: refs/heads/YARN-2928
Commit: 02637b46ea692bc10f769d65cb76417cfd4f5443
Parents: 14d4883
Author: Gera Shegalov g...@apache.org
Authored: Tue Apr 21 12:36:37 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:57 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../org/apache/hadoop/mapreduce/TaskID.java | 35 
 .../mapreduce/v2/hs/webapp/HsJobBlock.java  |  8 -
 .../mapreduce/v2/hs/webapp/TestBlocks.java  | 20 ++-
 .../v2/hs/webapp/TestHsWebServicesTasks.java| 27 ++-
 5 files changed, 60 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02637b46/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 0cf5c4b..ccdf6d6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -340,6 +340,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6293. Set job classloader on uber-job's LocalContainerLauncher
 event thread. (Sangjin Lee via gera)
 
+MAPREDUCE-6297. Task Id of the failed task in diagnostics should link to
+the task page. (Siqi Li via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02637b46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
index 488ffcc..b9817dd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java
@@ -25,6 +25,8 @@ import java.text.NumberFormat;
 import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -58,6 +60,9 @@ import org.apache.hadoop.io.WritableUtils;
 public class TaskID extends org.apache.hadoop.mapred.ID {
   protected static final String TASK = task;
   protected static final NumberFormat idFormat = NumberFormat.getInstance();
+  public static final String TASK_ID_REGEX = TASK + _(\\d+)_(\\d+)_ +
+  CharTaskTypeMaps.allTaskTypes + _(\\d+);
+  public static final Pattern taskIdPattern = Pattern.compile(TASK_ID_REGEX);
   static {
 idFormat.setGroupingUsed(false);
 idFormat.setMinimumIntegerDigits(6);
@@ -207,29 +212,15 @@ public class TaskID extends org.apache.hadoop.mapred.ID {
 throws IllegalArgumentException {
 if(str == null)
   return null;
-String exceptionMsg = null;
-try {
-  String[] parts = str.split(_);
-  if(parts.length == 5) {
-if(parts[0].equals(TASK)) {
-  String type = parts[3];
-  TaskType t = CharTaskTypeMaps.getTaskType(type.charAt(0));
-  if(t != null) {
-  
-return new org.apache.hadoop.mapred.TaskID(parts[1], 
- 
Integer.parseInt(parts[2]),
- t, 
- 
Integer.parseInt(parts[4]));
-  } else
-exceptionMsg = Bad TaskType identifier. TaskId string :  + str
-+  is not properly formed.;
-}
-  }
-}catch (Exception ex) {//fall below
-}
-if (exceptionMsg == null) {
-  exceptionMsg = TaskId string :  + str +  is not properly formed;
+Matcher m = taskIdPattern.matcher(str);
+if (m.matches()) {
+  return new org.apache.hadoop.mapred.TaskID(m.group(1),
+  Integer.parseInt(m.group(2)),
+  CharTaskTypeMaps.getTaskType(m.group(3).charAt(0)),
+  Integer.parseInt(m.group(4)));
 }
+String exceptionMsg = TaskId string :  + str +  is not properly formed 
+
+   

[15/29] hadoop git commit: HDFS-8173. NPE thrown at DataNode shutdown when HTTP server was not able to create (Contributed by surendra singh lilhore)

2015-04-21 Thread zjshen
HDFS-8173. NPE thrown at DataNode shutdown when HTTP server was not able to 
create (Contributed by surendra singh lilhore)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/233f1062
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/233f1062
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/233f1062

Branch: refs/heads/YARN-2928
Commit: 233f1062060e11e6a753281f388268bc034b7a9f
Parents: 63d0082
Author: Vinayakumar B vinayakum...@apache.org
Authored: Mon Apr 20 14:58:04 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:54 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../org/apache/hadoop/hdfs/server/datanode/DataNode.java  | 10 ++
 2 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/233f1062/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9bb1fd4..8dec32e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -526,6 +526,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8043. NPE in MiniDFSCluster teardown. (Brahma Reddy Battula via ozawa)
 
+HDFS-8173. NPE thrown at DataNode shutdown when HTTP server was not able to
+create (surendra singh lilhore via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/233f1062/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 0ddb99c..e81da52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1721,10 +1721,12 @@ public class DataNode extends ReconfigurableBase
 LOG.warn(Exception shutting down DataNode, e);
   }
 }
-try {
-  httpServer.close();
-} catch (Exception e) {
-  LOG.warn(Exception shutting down DataNode HttpServer, e);
+if (httpServer != null) {
+  try {
+httpServer.close();
+  } catch (Exception e) {
+LOG.warn(Exception shutting down DataNode HttpServer, e);
+  }
 }
 
 if (pauseMonitor != null) {



[08/29] hadoop git commit: HDFS-8043. NPE in MiniDFSCluster teardown. Contributed by Brahma Reddy Battula.

2015-04-21 Thread zjshen
HDFS-8043. NPE in MiniDFSCluster teardown. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c3e4d4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c3e4d4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c3e4d4f

Branch: refs/heads/YARN-2928
Commit: 3c3e4d4f9b92cbd2dc9113701b1b406c29760773
Parents: aa2a152
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Sun Apr 19 18:19:36 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:53 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 2 ++
 .../src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java  | 7 +--
 .../java/org/apache/hadoop/hdfs/TestDFSClientFailover.java| 4 +++-
 3 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3e4d4f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 25da013..30a460f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -520,6 +520,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7863. Missing description of some methods and parameters in javadoc of
 FSDirDeleteOp. (Brahma Reddy Battula via ozawa)
 
+HDFS-8043. NPE in MiniDFSCluster teardown. (Brahma Reddy Battula via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3e4d4f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index d92f49e..d3eaa6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -1744,11 +1744,14 @@ public class MiniDFSCluster {
 nameNode = null;
   }
 }
-if (deleteDfsDir) {
+if (base_dir != null) {
+  if (deleteDfsDir) {
 base_dir.delete();
-} else {
+  } else {
 base_dir.deleteOnExit();
+  }
 }
+
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3e4d4f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
index aa14909..644d66d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
@@ -85,7 +85,9 @@ public class TestDFSClientFailover {
   
   @After
   public void tearDownCluster() throws IOException {
-cluster.shutdown();
+if (cluster != null) {
+  cluster.shutdown();
+}
   }
 
   @After



[05/29] hadoop git commit: HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed by Haohui Mai.

2015-04-21 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63d0082c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
deleted file mode 100644
index 2dc1d04..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.io.*;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.*;
-
-/**
- * A Block is a Hadoop FS primitive, identified by a 
- * long.
- *
- **/
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class Block implements Writable, ComparableBlock {
-  public static final String BLOCK_FILE_PREFIX = blk_;
-  public static final String METADATA_EXTENSION = .meta;
-  static {  // register a ctor
-WritableFactories.setFactory
-  (Block.class,
-   new WritableFactory() {
- @Override
- public Writable newInstance() { return new Block(); }
-   });
-  }
-
-  public static final Pattern blockFilePattern = Pattern
-  .compile(BLOCK_FILE_PREFIX + (-??\\d++)$);
-  public static final Pattern metaFilePattern = Pattern
-  .compile(BLOCK_FILE_PREFIX + (-??\\d++)_(\\d++)\\ + METADATA_EXTENSION
-  + $);
-  public static final Pattern metaOrBlockFilePattern = Pattern
-  .compile(BLOCK_FILE_PREFIX + (-??\\d++)(_(\\d++)\\ + METADATA_EXTENSION
-  + )?$);
-
-  public static boolean isBlockFilename(File f) {
-String name = f.getName();
-return blockFilePattern.matcher(name).matches();
-  }
-
-  public static long filename2id(String name) {
-Matcher m = blockFilePattern.matcher(name);
-return m.matches() ? Long.parseLong(m.group(1)) : 0;
-  }
-
-  public static boolean isMetaFilename(String name) {
-return metaFilePattern.matcher(name).matches();
-  }
-
-  public static File metaToBlockFile(File metaFile) {
-return new File(metaFile.getParent(), metaFile.getName().substring(
-0, metaFile.getName().lastIndexOf('_')));
-  }
-
-  /**
-   * Get generation stamp from the name of the metafile name
-   */
-  public static long getGenerationStamp(String metaFile) {
-Matcher m = metaFilePattern.matcher(metaFile);
-return m.matches() ? Long.parseLong(m.group(2))
-: HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP;
-  }
-
-  /**
-   * Get the blockId from the name of the meta or block file
-   */
-  public static long getBlockId(String metaOrBlockFile) {
-Matcher m = metaOrBlockFilePattern.matcher(metaOrBlockFile);
-return m.matches() ? Long.parseLong(m.group(1)) : 0;
-  }
-
-  private long blockId;
-  private long numBytes;
-  private long generationStamp;
-
-  public Block() {this(0, 0, 0);}
-
-  public Block(final long blkid, final long len, final long generationStamp) {
-set(blkid, len, generationStamp);
-  }
-
-  public Block(final long blkid) {
-this(blkid, 0, HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP);
-  }
-
-  public Block(Block blk) {
-this(blk.blockId, blk.numBytes, blk.generationStamp);
-  }
-
-  /**
-   * Find the blockid from the given filename
-   */
-  public Block(File f, long len, long genstamp) {
-this(filename2id(f.getName()), len, genstamp);
-  }
-
-  public void set(long blkid, long len, long genStamp) {
-this.blockId = blkid;
-this.numBytes = len;
-this.generationStamp = genStamp;
-  }
-  /**
-   */
-  public long getBlockId() {
-return blockId;
-  }
-  
-  public void setBlockId(long bid) {
-blockId = bid;
-  }
-
-  /**
-   */
-  public String getBlockName() {
-return BLOCK_FILE_PREFIX + String.valueOf(blockId);
- 

[16/29] hadoop git commit: HDFS-8163. Using monotonicNow for block report scheduling causes test failures on recently restarted systems. (Arpit Agarwal)

2015-04-21 Thread zjshen
HDFS-8163. Using monotonicNow for block report scheduling causes test failures 
on recently restarted systems. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/539f79ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/539f79ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/539f79ad

Branch: refs/heads/YARN-2928
Commit: 539f79ad3432c52a7f8845af3dab34773e0cbc66
Parents: 841638d
Author: Arpit Agarwal a...@apache.org
Authored: Tue Apr 21 10:58:05 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:55 2015 -0700

--
 .../main/java/org/apache/hadoop/util/Time.java  |   2 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/datanode/BPOfferService.java|   2 +-
 .../hdfs/server/datanode/BPServiceActor.java| 203 ---
 .../datanode/TestBpServiceActorScheduler.java   | 163 +++
 5 files changed, 299 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/539f79ad/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
index b988923..20e2965 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
@@ -47,6 +47,8 @@ public final class Time {
* milliseconds, and not affected by settimeofday or similar system clock
* changes.  This is appropriate to use when computing how much longer to
* wait for an interval to expire.
+   * This function can return a negative value and it must be handled correctly
+   * by callers. See the documentation of System#nanoTime for caveats.
* @return a monotonic clock that counts in milliseconds.
*/
   public static long monotonicNow() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/539f79ad/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6951a08..e07e45d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -566,6 +566,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
 goes for infinite loop (vinayakumarb)
 
+HDFS-8163. Using monotonicNow for block report scheduling causes
+test failures on recently restarted systems. (Arpit Agarwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/539f79ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 1b42b19..92323f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -429,7 +429,7 @@ class BPOfferService {
*/
   void scheduleBlockReport(long delay) {
 for (BPServiceActor actor : bpServices) {
-  actor.scheduleBlockReport(delay);
+  actor.getScheduler().scheduleBlockReport(delay);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/539f79ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index ba5..5bc505f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import 

[18/29] hadoop git commit: Set the release date for 2.7.0

2015-04-21 Thread zjshen
Set the release date for 2.7.0


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4926b500
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4926b500
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4926b500

Branch: refs/heads/YARN-2928
Commit: 4926b5001080e35c97f94986759c7859e467b1cb
Parents: 2d09c8b
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Mon Apr 20 20:16:58 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:55 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
 hadoop-mapreduce-project/CHANGES.txt| 2 +-
 hadoop-yarn-project/CHANGES.txt | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4926b500/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a48baf8..230717c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -531,7 +531,7 @@ Release 2.7.1 - UNRELEASED
 
   BUG FIXES
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4926b500/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2291855..1aa9ce4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -564,7 +564,7 @@ Release 2.7.1 - UNRELEASED
 HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
 goes for infinite loop (vinayakumarb)
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4926b500/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index a02ae84..ffa01fa 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -351,7 +351,7 @@ Release 2.7.1 - UNRELEASED
 
 MAPREDUCE-6300. Task list sort by task id broken. (Siqi Li via aajisaka)
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4926b500/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0baee9d..3e34fac 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -309,7 +309,7 @@ Release 2.7.1 - UNRELEASED
 YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf
 without making a copy. (Jason Lowe via jianhe)
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 



[14/29] hadoop git commit: YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf without making a copy. Contributed by Jason Lowe

2015-04-21 Thread zjshen
YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf without 
making a copy. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea458a38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea458a38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea458a38

Branch: refs/heads/YARN-2928
Commit: ea458a383b607e408c0be433f5e1c15d08bd680b
Parents: 233f106
Author: Jian He jia...@apache.org
Authored: Mon Apr 20 10:38:27 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:54 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/client/api/impl/ContainerManagementProtocolProxy.java   | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea458a38/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0f9eef6..fe57d64 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -303,6 +303,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3487. CapacityScheduler scheduler lock obtained unnecessarily when 
 calling getQueue (Jason Lowe via wangda)
 
+YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf
+without making a copy. (Jason Lowe via jianhe)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea458a38/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
index eaf048d..94ebf0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
@@ -67,7 +67,7 @@ public class ContainerManagementProtocolProxy {
 
   public ContainerManagementProtocolProxy(Configuration conf,
   NMTokenCache nmTokenCache) {
-this.conf = conf;
+this.conf = new Configuration(conf);
 this.nmTokenCache = nmTokenCache;
 
 maxConnectedNMs =
@@ -88,7 +88,7 @@ public class ContainerManagementProtocolProxy {
   cmProxy = Collections.emptyMap();
   // Connections are not being cached so ensure connections close quickly
   // to avoid creating thousands of RPC client threads on large clusters.
-  conf.setInt(
+  this.conf.setInt(
   CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
   0);
 }



[26/29] hadoop git commit: YARN-3495. Confusing log generated by FairScheduler. Contributed by Brahma Reddy Battula.

2015-04-21 Thread zjshen
YARN-3495. Confusing log generated by FairScheduler. Contributed by Brahma 
Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/632c4092
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/632c4092
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/632c4092

Branch: refs/heads/YARN-2928
Commit: 632c40924310b281f430c874517248744669fbe1
Parents: 640ffea
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Apr 22 05:47:59 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:56 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../server/resourcemanager/scheduler/fair/FairScheduler.java| 5 +++--
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/632c4092/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3e34fac..a3ca475 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -292,6 +292,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3136. Fixed a synchronization problem of
 AbstractYarnScheduler#getTransferredContainers. (Sunil G via jianhe)
 
+YARN-3495. Confusing log generated by FairScheduler.
+(Brahma Reddy Battula via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/632c4092/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index a6c5416..f481de5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -796,7 +796,8 @@ public class FairScheduler extends
   protected synchronized void completedContainer(RMContainer rmContainer,
   ContainerStatus containerStatus, RMContainerEventType event) {
 if (rmContainer == null) {
-  LOG.info(Null container completed...);
+  LOG.info(Container  + containerStatus.getContainerId()
+  +  completed with event  + event);
   return;
 }
 
@@ -809,7 +810,7 @@ public class FairScheduler extends
 container.getId().getApplicationAttemptId().getApplicationId();
 if (application == null) {
   LOG.info(Container  + container +  of +
-   unknown application attempt  + appId +
+   finished application  + appId +
completed with event  + event);
   return;
 }



[02/29] hadoop git commit: YARN-3493. RM fails to come up with error Failed to load/recover state when mem settings are changed. (Jian He via wangda)

2015-04-21 Thread zjshen
YARN-3493. RM fails to come up with error Failed to load/recover state when 
mem settings are changed. (Jian He via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86762f05
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86762f05
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86762f05

Branch: refs/heads/YARN-2928
Commit: 86762f05ad7a8bdb66de09ce7ec4c9cc6fe906a6
Parents: d9c8159
Author: Wangda Tan wan...@apache.org
Authored: Fri Apr 17 17:11:22 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue Apr 21 16:16:52 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../ApplicationMasterService.java   |  2 +-
 .../server/resourcemanager/RMAppManager.java| 38 +--
 .../server/resourcemanager/RMServerUtils.java   |  4 +-
 .../rmapp/attempt/RMAppAttemptImpl.java |  1 -
 .../scheduler/SchedulerUtils.java   | 70 +---
 .../TestWorkPreservingRMRestart.java| 26 
 .../scheduler/TestSchedulerUtils.java   | 47 +++--
 8 files changed, 119 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86762f05/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index dd55bfd..7b2158c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -283,6 +283,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3021. YARN's delegation-token handling disallows certain trust setups
 to operate properly over DistCp. (Yongjun Zhang via jianhe)
 
+YARN-3493. RM fails to come up with error Failed to load/recover state 
+when mem settings are changed. (Jian He via wangda)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86762f05/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index b1f0472..a31127d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -501,7 +501,7 @@ public class ApplicationMasterService extends 
AbstractService implements
   
   // sanity check
   try {
-RMServerUtils.validateResourceRequests(ask,
+RMServerUtils.normalizeAndValidateRequests(ask,
 rScheduler.getMaximumResourceCapability(), app.getQueue(),
 rScheduler);
   } catch (InvalidResourceRequestException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86762f05/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 9197630..e511ff0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -279,7 +279,7 @@ public class RMAppManager implements 
EventHandlerRMAppManagerEvent,
 ApplicationId applicationId = submissionContext.getApplicationId();
 
 RMAppImpl application =
-createAndPopulateNewRMApp(submissionContext, submitTime, user);
+createAndPopulateNewRMApp(submissionContext, submitTime, user, false);
 ApplicationId appId = submissionContext.getApplicationId();
 
 if 

hadoop git commit: YARN-3410. YARN admin should be able to remove individual application records from RMStateStore. (Rohith Sharmaks via wangda)

2015-04-21 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk cfba35505 - e71d0d87d


YARN-3410. YARN admin should be able to remove individual application records 
from RMStateStore. (Rohith Sharmaks via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e71d0d87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e71d0d87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e71d0d87

Branch: refs/heads/trunk
Commit: e71d0d87d9b388f211a8eb3d2cd9af347abf9bda
Parents: cfba355
Author: Wangda Tan wan...@apache.org
Authored: Tue Apr 21 17:51:22 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Tue Apr 21 17:51:22 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 hadoop-yarn-project/hadoop-yarn/bin/yarn|  4 ++-
 .../server/resourcemanager/ResourceManager.java | 34 ++--
 .../recovery/FileSystemRMStateStore.java|  9 ++
 .../recovery/LeveldbRMStateStore.java   | 12 +++
 .../recovery/MemoryRMStateStore.java|  4 +++
 .../recovery/NullRMStateStore.java  |  5 +++
 .../resourcemanager/recovery/RMStateStore.java  |  9 ++
 .../recovery/ZKRMStateStore.java|  9 ++
 .../recovery/RMStateStoreTestBase.java  | 15 +
 .../recovery/TestFSRMStateStore.java|  1 +
 .../recovery/TestLeveldbRMStateStore.java   |  6 
 .../recovery/TestZKRMStateStore.java|  1 +
 .../src/site/markdown/YarnCommands.md   |  1 +
 14 files changed, 110 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e71d0d87/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 236a7d0..8b06dfc 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -90,6 +90,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3463. Integrate OrderingPolicy Framework with CapacityScheduler.
 (Craig Welch via wangda)
 
+YARN-3410. YARN admin should be able to remove individual application 
+records from RMStateStore. (Rohith Sharmaks via wangda)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e71d0d87/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index a98f3e6..12e9ef6 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -35,7 +35,9 @@ function hadoop_usage
   echo   proxyserver   run the web app proxy server
   echo   queue prints queue information
   echo   resourcemanager   run the ResourceManager
-  echo   resourcemanager -format-state-store   deletes the RMStateStore
+  echo Use -format-state-store for 
deleting the RMStateStore.
+  echo Use 
-remove-application-from-state-store appId for 
+  echo removing application from 
RMStateStore.
   echo   rmadmin   admin tools
   echo   scmadmin  SharedCacheManager admin tools
   echo   sharedcachemanagerrun the SharedCacheManager 
daemon

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e71d0d87/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 8bd8e21..130cfd4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -93,6 +93,7 @@ import 
org.apache.hadoop.yarn.server.webproxy.AppReportFetcher;
 import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxy;
 import 

svn commit: r8669 - in /release/hadoop/common: ./ hadoop-2.7.0/

2015-04-21 Thread vinodkv
Author: vinodkv
Date: Tue Apr 21 16:47:02 2015
New Revision: 8669

Log:
Publishing the bits for release 2.7.0

Added:
release/hadoop/common/hadoop-2.7.0/
release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz   (with props)
release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz.asc   (with 
props)
release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz.mds
release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0.tar.gz   (with props)
release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0.tar.gz.asc   (with props)
release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0.tar.gz.mds
Modified:
release/hadoop/common/current

Modified: release/hadoop/common/current
==
--- release/hadoop/common/current (original)
+++ release/hadoop/common/current Tue Apr 21 16:47:02 2015
@@ -1 +1 @@
-link hadoop-2.6.0
\ No newline at end of file
+link hadoop-2.7.0
\ No newline at end of file

Added: release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz
==
Binary file - no diff available.

Propchange: release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz
--
svn:mime-type = application/x-gzip

Added: release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz.asc
==
Binary file - no diff available.

Propchange: release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz.asc
--
svn:mime-type = application/pgp-signature

Added: release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz.mds
==
--- release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz.mds (added)
+++ release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz.mds Tue Apr 21 
16:47:02 2015
@@ -0,0 +1,36 @@
+hadoop-2.7.0-src.tar.gz:MD5 = 65 1D 97
+  04 0C 1D
+  EE 95  53
+  DA 20 79
+  EF 72 F2
+  8A
+hadoop-2.7.0-src.tar.gz:   SHA1 = 3985 C029 EEDF
+  BBEB A344  9671
+  2DB9 72D1 12E1
+  C05A
+hadoop-2.7.0-src.tar.gz: RMD160 = 9C62 4FDE 047D
+  400C 150E  C8E1
+  DC5D 32EA EF4F
+  664C
+hadoop-2.7.0-src.tar.gz: SHA224 = 52169B5C E499F341
+  A5CC8C7D 1CD69BD3
+  0800E273 094F4F53
+  F00CACD5
+hadoop-2.7.0-src.tar.gz: SHA256 = E7F877A3 A978D07A
+  392434B0 EAECC39E
+  286FE8FC 138E0F31
+  B1774AA2 3173338E
+hadoop-2.7.0-src.tar.gz: SHA384 = F8EBF286 1F20A195
+  7A1E4022 1B5F6B76
+  A3240345 C3F5026A
+  ECE5DB9A 3683166A
+  51A02217 8B189360
+  66F650EF CC1BC5B9
+hadoop-2.7.0-src.tar.gz: SHA512 = CF5DEB97 111EDD9B
+  A0148A53 5A5F2062
+  B49C3AE2 5E0460DF
+  39F95368 562A4D35
+  EC2D52EF 90F8FFA4
+  05EEF1BD 316D23EB
+  737BDD75 5961CF88
+  270A2F01 DD26C480

Added: release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0.tar.gz
==
Binary file - no diff available.

Propchange: release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0.tar.gz
--
svn:mime-type = application/x-gzip

Added: release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0.tar.gz.asc
==
Binary file - no diff available.

Propchange: release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0.tar.gz.asc
--
svn:mime-type = application/pgp-signature

Added: release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0.tar.gz.mds
==
--- release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0.tar.gz.mds (added)
+++ release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0.tar.gz.mds Tue Apr 21 
16:47:02 2015
@@ -0,0 +1,36 @@
+hadoop-2.7.0.tar.gz:MD5 = 79 A6 E8
+  7B 09