hadoop git commit: YARN-3258. FairScheduler: Need to add more logging to investigate allocations. Contributed by Anubhav Dhoot.

2015-03-31 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 85dc3c14b - b5a22e983


YARN-3258. FairScheduler: Need to add more logging to investigate allocations. 
Contributed by Anubhav Dhoot.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5a22e98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5a22e98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5a22e98

Branch: refs/heads/trunk
Commit: b5a22e983832d4843b5df1d07858988e8bbf37e3
Parents: 85dc3c1
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 31 17:42:44 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 31 17:42:44 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../server/resourcemanager/scheduler/fair/FSAppAttempt.java  | 4 
 .../server/resourcemanager/scheduler/fair/FSLeafQueue.java   | 8 +++-
 .../yarn/server/resourcemanager/scheduler/fair/FSQueue.java  | 8 
 4 files changed, 22 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5a22e98/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f72d06d..d650e1b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -86,6 +86,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2495. Allow admin specify labels from each NM (Distributed 
 configuration for node label). (Naganarasimha G R via wangda)
 
+YARN-3258. FairScheduler: Need to add more logging to investigate
+allocations. (Anubhav Dhoot via ozawa)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5a22e98/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index dfde5ab..46617ff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -570,6 +570,10 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 // Check the AM resource usage for the leaf queue
 if (getLiveContainers().size() == 0  !getUnmanagedAM()) {
   if (!getQueue().canRunAppAM(getAMResource())) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug(Skipping allocation because maxAMShare limit would  +
+  be exceeded);
+}
 return Resources.none();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5a22e98/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 3c97535..c49a323 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -284,6 +284,8 @@ public class FSLeafQueue extends FSQueue {
 if (LOG.isDebugEnabled()) {
   LOG.debug(The updated demand for  + getName() +  is  + demand
   + ; the max is  + maxRes);
+  LOG.debug(The updated fairshare for  + getName() +  is 
+  + getFairShare());
 }
   }
   
@@ -304,7 +306,7 @@ public class FSLeafQueue extends FSQueue {
 

hadoop git commit: YARN-3258. FairScheduler: Need to add more logging to investigate allocations. Contributed by Anubhav Dhoot.

2015-03-31 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 312e50f9c - 99b825569


YARN-3258. FairScheduler: Need to add more logging to investigate allocations. 
Contributed by Anubhav Dhoot.

(cherry picked from commit b5a22e983832d4843b5df1d07858988e8bbf37e3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99b82556
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99b82556
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99b82556

Branch: refs/heads/branch-2
Commit: 99b8255693f92db8c6cde8c976e7f4263dbc
Parents: 312e50f
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 31 17:42:44 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 31 17:43:07 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../server/resourcemanager/scheduler/fair/FSAppAttempt.java  | 4 
 .../server/resourcemanager/scheduler/fair/FSLeafQueue.java   | 8 +++-
 .../yarn/server/resourcemanager/scheduler/fair/FSQueue.java  | 8 
 4 files changed, 22 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b82556/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d2850ad..b76ef16 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -38,6 +38,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2495. Allow admin specify labels from each NM (Distributed 
 configuration for node label). (Naganarasimha G R via wangda)
 
+YARN-3258. FairScheduler: Need to add more logging to investigate
+allocations. (Anubhav Dhoot via ozawa)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b82556/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index dfde5ab..46617ff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -570,6 +570,10 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 // Check the AM resource usage for the leaf queue
 if (getLiveContainers().size() == 0  !getUnmanagedAM()) {
   if (!getQueue().canRunAppAM(getAMResource())) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug(Skipping allocation because maxAMShare limit would  +
+  be exceeded);
+}
 return Resources.none();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b82556/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 3c97535..c49a323 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -284,6 +284,8 @@ public class FSLeafQueue extends FSQueue {
 if (LOG.isDebugEnabled()) {
   LOG.debug(The updated demand for  + getName() +  is  + demand
   + ; the max is  + maxRes);
+  LOG.debug(The updated fairshare for  + getName() +  is 
+  + getFairShare());
 }
   

hadoop git commit: HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits (Vinayakumar B)

2015-03-31 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 5ef6204c0 - 86a9b65df


HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits 
(Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86a9b65d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86a9b65d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86a9b65d

Branch: refs/heads/HDFS-7285
Commit: 86a9b65df1d71c12eaf9cafb2a39e1dfb5f5f2c9
Parents: 5ef6204
Author: Vinayakumar B vinayakuma...@intel.com
Authored: Tue Mar 31 15:12:09 2015 +0530
Committer: Vinayakumar B vinayakuma...@intel.com
Committed: Tue Mar 31 15:12:09 2015 +0530

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt| 40 +++-
 1 file changed, 39 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86a9b65d/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 21e4c03..a686315 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -3,6 +3,44 @@
 HDFS-7347. Configurable erasure coding policy for individual files and
 directories ( Zhe Zhang via vinayakumarb )
 
-HDFS-7716. Add a test for BlockGroup support in FSImage.
+HDFS-7339. Representing striped block groups in NameNode with hierarchical
+naming protocol ( Zhe Zhang )
+
+HDFS-7652. Process block reports for erasure coded blocks (Zhe Zhang)
+
+HDFS-7716. Erasure Coding: extend BlockInfo to handle EC info (Jing Zhao)
+
+HDFS-7749. Erasure Coding: Add striped block support in INodeFile (Jing 
Zhao)
+
+HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode
+(Jing Zhao via Zhe Zhang)
+
+HDFS-7872. Erasure Coding: INodeFile.dumpTreeRecursively() supports to 
print
+striped blocks (Takuya Fukudome via jing9)
+
+HDFS-7853. Erasure coding: extend LocatedBlocks to support reading from
+striped files (Jing Zhao)
+
+HDFS-7826. Erasure Coding: Update INodeFile quota computation for striped
+blocks ( Kai Sasaki via jing9 )
+
+HDFS-7912. Erasure Coding: track BlockInfo instead of Block in
+UnderReplicatedBlocks and PendingReplicationBlocks (Jing Zhao)
+
+HDFS-7369. Erasure coding: distribute recovery work for striped blocks to
+DataNode (Zhe Zhang)
+
+HDFS-7864. Erasure Coding: Update safemode calculation for striped blocks
+(GAO Rui via jing9)
+
+HDFS-7827. Erasure Coding: support striped blocks in non-protobuf fsimage
+( Hui Zheng via jing9 )
+
+HDFS-7616. Add a test for BlockGroup support in FSImage.
 (Takuya Fukudome via szetszwo)
 
+HDFS-7907. Erasure Coding: track invalid, corrupt, and under-recovery 
striped
+blocks in NameNode (Jing Zhao)
+
+HDFS-8005. Erasure Coding: simplify striped block recovery work computation
+and add tests (Jing Zhao)
\ No newline at end of file



hadoop git commit: HDFS-7671. hdfs user guide should point to the common rack awareness doc. Contributed by Kai Sasaki.

2015-03-31 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e4cd67e9b - 7f6811ad7


HDFS-7671. hdfs user guide should point to the common rack awareness doc. 
Contributed by Kai Sasaki.

(cherry picked from commit 859cab2f2273f563fd70e3e616758edef91ccf41)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f6811ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f6811ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f6811ad

Branch: refs/heads/branch-2
Commit: 7f6811ad73bfc59ca9897804dbe0f8f3a4f84c3c
Parents: e4cd67e
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Apr 1 00:26:16 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Wed Apr 1 00:26:54 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/site/markdown/HdfsUserGuide.md| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f6811ad/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index da3d729..3bfc550 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -53,6 +53,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7944. Minor cleanup of BlockPoolManager#getAllNamenodeThreads.
 (Arpit Agarwal)
 
+HDFS-7671. hdfs user guide should point to the common rack awareness doc.
+(Kai Sasaki via aajisaka)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f6811ad/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
index 37fa4be..ffd8532 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
@@ -264,7 +264,7 @@ For command usage, see 
[balancer](./HDFSCommands.html#balancer).
 Rack Awareness
 --
 
-Typically large Hadoop clusters are arranged in racks and network traffic 
between different nodes with in the same rack is much more desirable than 
network traffic across the racks. In addition NameNode tries to place replicas 
of block on multiple racks for improved fault tolerance. Hadoop lets the 
cluster administrators decide which rack a node belongs to through 
configuration variable `net.topology.script.file.name`. When this script is 
configured, each node runs the script to determine its rack id. A default 
installation assumes all the nodes belong to the same rack. This feature and 
configuration is further described in PDF attached to 
[HADOOP-692](https://issues.apache.org/jira/browse/HADOOP-692).
+A HDFS cluster can recognize the topology of racks where each nodes are put. 
It is important to configure this topology in order to optimize the data 
capacity and usage. For more detail, please check the [rack 
awareness](../hadoop-common/RackAwareness.html) in common document.
 
 Safemode
 



hadoop git commit: HDFS-7671. hdfs user guide should point to the common rack awareness doc. Contributed by Kai Sasaki.

2015-03-31 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3df61f303 - 859cab2f2


HDFS-7671. hdfs user guide should point to the common rack awareness doc. 
Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/859cab2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/859cab2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/859cab2f

Branch: refs/heads/trunk
Commit: 859cab2f2273f563fd70e3e616758edef91ccf41
Parents: 3df61f3
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Apr 1 00:26:16 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Wed Apr 1 00:26:16 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/site/markdown/HdfsUserGuide.md| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/859cab2f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e8075e6..f3537b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -368,6 +368,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7944. Minor cleanup of BlockPoolManager#getAllNamenodeThreads.
 (Arpit Agarwal)
 
+HDFS-7671. hdfs user guide should point to the common rack awareness doc.
+(Kai Sasaki via aajisaka)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/859cab2f/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
index 37fa4be..ffd8532 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
@@ -264,7 +264,7 @@ For command usage, see 
[balancer](./HDFSCommands.html#balancer).
 Rack Awareness
 --
 
-Typically large Hadoop clusters are arranged in racks and network traffic 
between different nodes with in the same rack is much more desirable than 
network traffic across the racks. In addition NameNode tries to place replicas 
of block on multiple racks for improved fault tolerance. Hadoop lets the 
cluster administrators decide which rack a node belongs to through 
configuration variable `net.topology.script.file.name`. When this script is 
configured, each node runs the script to determine its rack id. A default 
installation assumes all the nodes belong to the same rack. This feature and 
configuration is further described in PDF attached to 
[HADOOP-692](https://issues.apache.org/jira/browse/HADOOP-692).
+A HDFS cluster can recognize the topology of racks where each nodes are put. 
It is important to configure this topology in order to optimize the data 
capacity and usage. For more detail, please check the [rack 
awareness](../hadoop-common/RackAwareness.html) in common document.
 
 Safemode
 



hadoop git commit: YARN-3412. RM tests should use MockRM where possible. (kasha) (cherry picked from commit 79f7f2aabfd7a69722748850f4d3b1ff54af7556)

2015-03-31 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7f6811ad7 - 85c420a4d


YARN-3412. RM tests should use MockRM where possible. (kasha)
(cherry picked from commit 79f7f2aabfd7a69722748850f4d3b1ff54af7556)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85c420a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85c420a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85c420a4

Branch: refs/heads/branch-2
Commit: 85c420a4d48969e209079a560dbe00dee7d8a2eb
Parents: 7f6811a
Author: Karthik Kambatla ka...@apache.org
Authored: Tue Mar 31 09:14:15 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Tue Mar 31 09:22:16 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 2 ++
 .../yarn/server/resourcemanager/TestMoveApplication.java  | 3 +--
 .../yarn/server/resourcemanager/TestResourceManager.java  | 6 ++
 .../server/resourcemanager/monitor/TestSchedulingMonitor.java | 3 ++-
 .../server/resourcemanager/recovery/TestZKRMStateStore.java   | 7 +++
 .../resourcemanager/scheduler/fair/TestFairScheduler.java | 3 +--
 .../scheduler/fair/TestFairSchedulerEventLog.java | 4 ++--
 .../resourcemanager/scheduler/fifo/TestFifoScheduler.java | 5 ++---
 8 files changed, 15 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85c420a4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b76ef16..273a788 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -71,6 +71,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3400. [JDK 8] Build Failure due to unreported exceptions in
 RPCUtil (rkanter)
 
+YARN-3412. RM tests should use MockRM where possible. (kasha)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85c420a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestMoveApplication.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestMoveApplication.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestMoveApplication.java
index 36153de..d2bde80 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestMoveApplication.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestMoveApplication.java
@@ -52,8 +52,7 @@ public class TestMoveApplication {
 FifoSchedulerWithMove.class);
 conf.set(YarnConfiguration.YARN_ADMIN_ACL,  );
 conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
-resourceManager = new ResourceManager();
-resourceManager.init(conf);
+resourceManager = new MockRM(conf);
 
resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey();
 resourceManager.getRMContext().getNMTokenSecretManager().rollMasterKey();
 resourceManager.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85c420a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
index 6735575..fbf54fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -212,9 +212,8 @@ public class TestResourceManager {
   public void testResourceManagerInitConfigValidation() throws Exception {
 Configuration conf = new YarnConfiguration();
 conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, -1);
-

hadoop git commit: HDFS-7617. Add unit tests for editlog transactions for EC. Contributed by Hui Zheng.

2015-03-31 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 86a9b65df - b3e2fc1ed


HDFS-7617. Add unit tests for editlog transactions for EC. Contributed by Hui 
Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3e2fc1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3e2fc1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3e2fc1e

Branch: refs/heads/HDFS-7285
Commit: b3e2fc1edae6f4794d9c34e1dd001861af8f283f
Parents: 86a9b65
Author: Zhe Zhang z...@apache.org
Authored: Tue Mar 31 10:46:04 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Mar 31 10:46:04 2015 -0700

--
 .../server/namenode/TestFSEditLogLoader.java| 157 +++
 1 file changed, 157 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3e2fc1e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 833ef95..d3cb749 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -39,14 +39,18 @@ import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import 
org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.log4j.Level;
@@ -414,4 +418,157 @@ public class TestFSEditLogLoader {
   fromByte(code), FSEditLogOpCodes.fromByte(code));
 }
   }
+
+  @Test
+  public void testAddNewStripedBlock() throws IOException{
+// start a cluster
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  FSNamesystem fns = cluster.getNamesystem();
+
+  String testDir = /ec;
+  String testFile = testfile_001;
+  String testFilePath = testDir + / + testFile;
+  String clientName = testUser1;
+  String clientMachine = testMachine1;
+  long blkId = 1;
+  long blkNumBytes = 1024;
+  long timestamp = 1426222918;
+  short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
+  short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
+
+  //set the storage policy of the directory
+  fs.mkdir(new Path(testDir), new FsPermission(755));
+  fs.setStoragePolicy(new Path(testDir),
+  HdfsConstants.EC_STORAGE_POLICY_NAME);
+
+  // Create a file with striped block
+  Path p = new Path(testFilePath);
+  DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
+
+  fns.enterSafeMode(false);
+  fns.saveNamespace(0, 0);
+  fns.leaveSafeMode();
+
+  // Add a striped block to the file
+  BlockInfoStriped stripedBlk = new BlockInfoStriped(
+  new Block(blkId, blkNumBytes, timestamp), blockNum, parityNum);
+  INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
+  file.toUnderConstruction(clientName, clientMachine);
+  file.getStripedBlocksFeature().addBlock(stripedBlk);
+  fns.getEditLog().logAddBlock(testFilePath, file);
+  file.toCompleteFile(System.currentTimeMillis());
+
+  //If the block by loaded is the same as above it means that
+  //we have successfully applied the edit log to the fsimage.
+  cluster.restartNameNodes();
+  cluster.waitActive();
+  fns = cluster.getNamesystem();
+
+  INodeFile inodeLoaded = (INodeFile)fns.getFSDirectory()
+  .getINode(testFilePath);
+
+  assertTrue(inodeLoaded.isWithStripedBlocks());
+
+  BlockInfoStriped[] 

[3/3] hadoop git commit: Revert MAPREDUCE-5875. Make Counter limits consistent across JobClient, MRAppMaster, and YarnChild. (Gera Shegalov via kasha)

2015-03-31 Thread vinodkv
Revert MAPREDUCE-5875. Make Counter limits consistent across JobClient, 
MRAppMaster, and YarnChild. (Gera Shegalov via kasha)

This reverts commit 7bfd9e068d205f752c05d51778a4a4702329d31e.

Conflicts:
hadoop-mapreduce-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec123abd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec123abd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec123abd

Branch: refs/heads/branch-2.7
Commit: ec123abdc9dfd42b1b960f3f1f5e4958296735a8
Parents: 1c050bd
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Tue Mar 31 11:30:04 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Tue Mar 31 11:30:04 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  4 -
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  3 -
 .../org/apache/hadoop/mapreduce/Cluster.java| 16 ++--
 .../apache/hadoop/mapreduce/JobSubmitter.java   |  2 -
 .../hadoop/mapreduce/counters/Limits.java   |  5 --
 .../mapreduce/jobhistory/HistoryViewer.java | 18 +---
 .../hadoop/mapreduce/v2/hs/CompletedJob.java| 15 
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  | 87 +++-
 8 files changed, 22 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec123abd/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9513b50..9f52c49 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -405,9 +405,6 @@ Release 2.6.0 - 2014-11-18
 MAPREDUCE-6123. TestCombineFileInputFormat incorrectly starts 2
 MiniDFSCluster instances. (cnauroth)
 
-MAPREDUCE-5875. Make Counter limits consistent across JobClient, 
-MRAppMaster, and YarnChild. (Gera Shegalov via kasha)
-
 MAPREDUCE-6125. TestContainerLauncherImpl sometimes fails (Mit Desai via
 jlowe)
 
@@ -453,7 +450,6 @@ Release 2.5.2 - 2014-11-19
 
   BUG FIXES
 
-
 Release 2.5.1 - 2014-09-05
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec123abd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index f484935..8f63882 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.TypeConverter;
-import org.apache.hadoop.mapreduce.counters.Limits;
 import org.apache.hadoop.mapreduce.jobhistory.AMStartedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.EventReader;
 import org.apache.hadoop.mapreduce.jobhistory.EventType;
@@ -1090,8 +1089,6 @@ public class MRAppMaster extends CompositeService {
 
 // finally set the job classloader
 MRApps.setClassLoader(jobClassLoader, getConfig());
-// set job classloader if configured
-Limits.init(getConfig());
 
 if (initFailed) {
   JobEvent initFailedEvent = new JobEvent(job.getID(), 
JobEventType.JOB_INIT_FAILED);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec123abd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
index 34353ac..a92e648 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
@@ -183,15 +183,15 @@ public class Cluster {
   public Job 

[1/3] hadoop git commit: Revert MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which causes counter limits are not reset correctly. Contributed by Zhihai Xu.

2015-03-31 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 96d04307f - ec123abdc


Revert MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which 
causes counter limits are not reset correctly. Contributed by Zhihai Xu.

This reverts commit 929b04ce3a4fe419dece49ed68d4f6228be214c1.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02646ddb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02646ddb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02646ddb

Branch: refs/heads/branch-2.7
Commit: 02646ddba291665efde76f8496260d91b5a27003
Parents: 96d0430
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Tue Mar 31 11:26:13 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Tue Mar 31 11:26:13 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt | 4 
 .../org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java| 2 +-
 2 files changed, 1 insertion(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02646ddb/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index da29b4e..ef0a964 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -100,10 +100,6 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
-MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
-causes counter limits are not reset correctly.
-(Zhihai Xu via harsh)
-
 MAPREDUCE-6210. Use getApplicationAttemptId() instead of getApplicationId()
 for logging AttemptId in RMContainerAllocator.java (Leitao Guo via 
aajisaka)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02646ddb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
index f343d7c..43b2df2 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
@@ -93,7 +93,7 @@ public class HistoryViewer {
   final Configuration jobConf = new Configuration(conf);
   try {
 jobConf.addResource(fs.open(jobConfPath), jobConfPath.toString());
-Limits.reset(jobConf);
+Limits.reset(conf);
   } catch (FileNotFoundException fnf) {
 if (LOG.isWarnEnabled()) {
   LOG.warn(Missing job conf in history, fnf);



hadoop git commit: Reverted MAPREDUCE-6286, MAPREDUCE-6199, and MAPREDUCE-5875 from branch-2.7. Editing CHANGES.txt to reflect this.

2015-03-31 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 79f7f2aab - e428fea73


Reverted MAPREDUCE-6286, MAPREDUCE-6199, and MAPREDUCE-5875 from branch-2.7. 
Editing CHANGES.txt to reflect this.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e428fea7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e428fea7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e428fea7

Branch: refs/heads/trunk
Commit: e428fea73029ea0c3494c71a50c5f6c994888fd2
Parents: 79f7f2a
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Tue Mar 31 13:29:20 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Tue Mar 31 13:29:20 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e428fea7/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b0367a7..f5d2d1a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -311,6 +311,16 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6294. Remove an extra parameter described in Javadoc of
 TockenCache. (Brahma Reddy Battula via ozawa)
 
+MAPREDUCE-5875. Make Counter limits consistent across JobClient, 
+MRAppMaster, and YarnChild. (Gera Shegalov via kasha)
+
+MAPREDUCE-6199. AbstractCounters are not reset completely on
+deserialization (adhoot via rkanter)
+
+MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
+causes counter limits are not reset correctly.
+(Zhihai Xu via harsh)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -411,10 +421,6 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
-MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
-causes counter limits are not reset correctly.
-(Zhihai Xu via harsh)
-
 MAPREDUCE-6210. Use getApplicationAttemptId() instead of getApplicationId()
 for logging AttemptId in RMContainerAllocator.java (Leitao Guo via 
aajisaka)
 
@@ -448,9 +454,6 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6045. need close the DataInputStream after open it in
 TestMapReduce.java (zxu via rkanter)
 
-MAPREDUCE-6199. AbstractCounters are not reset completely on
-deserialization (adhoot via rkanter)
-
 MAPREDUCE-6206. TestAggregatedTransferRate fails on non-US systems (Jens
 Rabe via jlowe)
 
@@ -721,9 +724,6 @@ Release 2.6.0 - 2014-11-18
 MAPREDUCE-6123. TestCombineFileInputFormat incorrectly starts 2
 MiniDFSCluster instances. (cnauroth)
 
-MAPREDUCE-5875. Make Counter limits consistent across JobClient, 
-MRAppMaster, and YarnChild. (Gera Shegalov via kasha)
-
 MAPREDUCE-6125. TestContainerLauncherImpl sometimes fails (Mit Desai via
 jlowe)
 



[2/3] hadoop git commit: Revert MAPREDUCE-6199. AbstractCounters are not reset completely on deserialization (adhoot via rkanter)

2015-03-31 Thread vinodkv
Revert MAPREDUCE-6199. AbstractCounters are not reset completely on 
deserialization (adhoot via rkanter)

This reverts commit f9341c1e2cffc4e3ac37cb7da4e535ca5c6d3913.

Conflicts:
hadoop-mapreduce-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c050bd1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c050bd1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c050bd1

Branch: refs/heads/branch-2.7
Commit: 1c050bd13538fd6b2abbf65dfc441640c9d0f2cb
Parents: 02646dd
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Tue Mar 31 11:28:51 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Tue Mar 31 11:28:51 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 --
 .../mapreduce/counters/AbstractCounters.java|  4 --
 .../hadoop/mapreduce/counters/Limits.java   |  7 
 .../apache/hadoop/mapreduce/TestCounters.java   | 39 +---
 4 files changed, 1 insertion(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c050bd1/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ef0a964..9513b50 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -133,9 +133,6 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6045. need close the DataInputStream after open it in
 TestMapReduce.java (zxu via rkanter)
 
-MAPREDUCE-6199. AbstractCounters are not reset completely on
-deserialization (adhoot via rkanter)
-
 MAPREDUCE-6206. TestAggregatedTransferRate fails on non-US systems (Jens
 Rabe via jlowe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c050bd1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
index dd81ebb..401bbb2 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
@@ -307,10 +307,6 @@ public abstract class AbstractCountersC extends Counter,
   fgroups.put(group.getName(), group);
 }
 int numGroups = WritableUtils.readVInt(in);
-if (!groups.isEmpty()) {
-  groups.clear();
-  limits.reset();
-}
 while (numGroups--  0) {
   limits.checkGroups(groups.size() + 1);
   G group = groupFactory.newGenericGroup(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c050bd1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
index 9546c8d..3821694 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
@@ -124,15 +124,8 @@ public class Limits {
 return firstViolation;
   }
 
-  // This allows initialization of global settings and not for an instance
   public static synchronized void reset(Configuration conf) {
 isInited = false;
 init(conf);
   }
-
-  // This allows resetting of an instance to allow reuse
-  public synchronized void reset() {
-totalCounters = 0;
-firstViolation = null;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c050bd1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java
--
diff --git 

hadoop git commit: Reverted MAPREDUCE-6286, MAPREDUCE-6199, and MAPREDUCE-5875 from branch-2.7. Editing CHANGES.txt to reflect this.

2015-03-31 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 85c420a4d - 4cf44bef5


Reverted MAPREDUCE-6286, MAPREDUCE-6199, and MAPREDUCE-5875 from branch-2.7. 
Editing CHANGES.txt to reflect this.

(cherry picked from commit e428fea73029ea0c3494c71a50c5f6c994888fd2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cf44bef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cf44bef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cf44bef

Branch: refs/heads/branch-2
Commit: 4cf44bef5ca5fee69f712c448f6969e2e046d495
Parents: 85c420a
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Tue Mar 31 13:29:20 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Tue Mar 31 13:30:22 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cf44bef/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 3efe73a..34aa2ed 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -66,6 +66,16 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6294. Remove an extra parameter described in Javadoc of
 TockenCache. (Brahma Reddy Battula via ozawa)
 
+MAPREDUCE-5875. Make Counter limits consistent across JobClient, 
+MRAppMaster, and YarnChild. (Gera Shegalov via kasha)
+
+MAPREDUCE-6199. AbstractCounters are not reset completely on
+deserialization (adhoot via rkanter)
+
+MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
+causes counter limits are not reset correctly.
+(Zhihai Xu via harsh)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -166,10 +176,6 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
-MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
-causes counter limits are not reset correctly.
-(Zhihai Xu via harsh)
-
 MAPREDUCE-6210. Use getApplicationAttemptId() instead of getApplicationId()
 for logging AttemptId in RMContainerAllocator.java (Leitao Guo via 
aajisaka)
 
@@ -203,9 +209,6 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6045. need close the DataInputStream after open it in
 TestMapReduce.java (zxu via rkanter)
 
-MAPREDUCE-6199. AbstractCounters are not reset completely on
-deserialization (adhoot via rkanter)
-
 MAPREDUCE-6206. TestAggregatedTransferRate fails on non-US systems (Jens
 Rabe via jlowe)
 
@@ -478,9 +481,6 @@ Release 2.6.0 - 2014-11-18
 MAPREDUCE-6123. TestCombineFileInputFormat incorrectly starts 2
 MiniDFSCluster instances. (cnauroth)
 
-MAPREDUCE-5875. Make Counter limits consistent across JobClient, 
-MRAppMaster, and YarnChild. (Gera Shegalov via kasha)
-
 MAPREDUCE-6125. TestContainerLauncherImpl sometimes fails (Mit Desai via
 jlowe)
 



hadoop git commit: HDFS-7997. The first non-existing xattr should also throw IOException. (zhouyingchao via yliu)

2015-03-31 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 99b825569 - e4cd67e9b


HDFS-7997. The first non-existing xattr should also throw IOException. 
(zhouyingchao via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4cd67e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4cd67e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4cd67e9

Branch: refs/heads/branch-2
Commit: e4cd67e9b0bcc4e5aedb8c189dc780326c69b032
Parents: 99b8255
Author: yliu y...@apache.org
Authored: Tue Mar 31 21:16:13 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Mar 31 21:16:13 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java | 3 ++-
 .../hadoop-hdfs/src/test/resources/testXAttrConf.xml | 4 ++--
 3 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4cd67e9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3d7690e..da3d729 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -77,6 +77,9 @@ Release 2.8.0 - UNRELEASED
 DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin
 P. McCabe)
 
+HDFS-7997. The first non-existing xattr should also throw IOException.
+(zhouyingchao via yliu)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4cd67e9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 45e63f2..d5c9124 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -108,7 +108,8 @@ class FSDirXAttrOp {
   return filteredAll;
 }
 if (filteredAll == null || filteredAll.isEmpty()) {
-  return null;
+  throw new IOException(
+  At least one of the attributes provided was not found.);
 }
 ListXAttr toGet = Lists.newArrayListWithCapacity(xAttrs.size());
 for (XAttr xAttr : xAttrs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4cd67e9/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
index 9c66cba..c2e836c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
@@ -420,8 +420,8 @@
   /cleanup-commands
   comparators
comparator
-  typeExactComparator/type
-  expected-output# file: /file1#LF#/expected-output
+  typeSubstringComparator/type
+  expected-outputAt least one of the attributes provided was not 
found/expected-output
 /comparator
   /comparators
 /test



hadoop git commit: HDFS-7997. The first non-existing xattr should also throw IOException. (zhouyingchao via yliu)

2015-03-31 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk b5a22e983 - 3df61f303


HDFS-7997. The first non-existing xattr should also throw IOException. 
(zhouyingchao via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3df61f30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3df61f30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3df61f30

Branch: refs/heads/trunk
Commit: 3df61f303a8c0f5105661dc86fc3a34a60c49066
Parents: b5a22e9
Author: yliu y...@apache.org
Authored: Tue Mar 31 21:17:44 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Mar 31 21:17:44 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java | 3 ++-
 .../hadoop-hdfs/src/test/resources/testXAttrConf.xml | 4 ++--
 3 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3df61f30/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0bea916..e8075e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -392,6 +392,9 @@ Release 2.8.0 - UNRELEASED
 DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin
 P. McCabe)
 
+HDFS-7997. The first non-existing xattr should also throw IOException.
+(zhouyingchao via yliu)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3df61f30/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 45e63f2..d5c9124 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -108,7 +108,8 @@ class FSDirXAttrOp {
   return filteredAll;
 }
 if (filteredAll == null || filteredAll.isEmpty()) {
-  return null;
+  throw new IOException(
+  At least one of the attributes provided was not found.);
 }
 ListXAttr toGet = Lists.newArrayListWithCapacity(xAttrs.size());
 for (XAttr xAttr : xAttrs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3df61f30/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
index 9c66cba..c2e836c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
@@ -420,8 +420,8 @@
   /cleanup-commands
   comparators
comparator
-  typeExactComparator/type
-  expected-output# file: /file1#LF#/expected-output
+  typeSubstringComparator/type
+  expected-outputAt least one of the attributes provided was not 
found/expected-output
 /comparator
   /comparators
 /test



hadoop git commit: HDFS-6945. BlockManager should remove a block from excessReplicateMap and decrement ExcessBlocks metric when the block is removed. (aajisaka)

2015-03-31 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4cf44bef5 - b85bbca74


HDFS-6945. BlockManager should remove a block from excessReplicateMap and 
decrement ExcessBlocks metric when the block is removed. (aajisaka)

(cherry picked from commit 18a91fe4df0448d9f7de91602646ecf5a51c52e4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b85bbca7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b85bbca7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b85bbca7

Branch: refs/heads/branch-2
Commit: b85bbca74565b18dfa6689c9545d07bff5d31f83
Parents: 4cf44be
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Apr 1 09:07:28 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Wed Apr 1 09:08:22 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockManager.java| 22 ++--
 .../namenode/metrics/TestNameNodeMetrics.java   |  9 ++--
 3 files changed, 30 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b85bbca7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3bfc550..1d733a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -83,6 +83,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7997. The first non-existing xattr should also throw IOException.
 (zhouyingchao via yliu)
 
+HDFS-6945. BlockManager should remove a block from excessReplicateMap and
+decrement ExcessBlocks metric when the block is removed. (aajisaka)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b85bbca7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 11965c1..acb5c44 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3354,8 +3354,7 @@ public class BlockManager {
 // file already removes them from the block map below.
 block.setNumBytes(BlockCommand.NO_ACK);
 addToInvalidates(block);
-corruptReplicas.removeFromCorruptReplicasMap(block);
-blocksMap.removeBlock(block);
+removeBlockFromMap(block);
 // Remove the block from pendingReplications and neededReplications
 pendingReplications.remove(block);
 neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
@@ -3531,11 +3530,30 @@ public class BlockManager {
   }
 
   public void removeBlockFromMap(Block block) {
+removeFromExcessReplicateMap(block);
 blocksMap.removeBlock(block);
 // If block is removed from blocksMap remove it from corruptReplicasMap
 corruptReplicas.removeFromCorruptReplicasMap(block);
   }
 
+  /**
+   * If a block is removed from blocksMap, remove it from excessReplicateMap.
+   */
+  private void removeFromExcessReplicateMap(Block block) {
+for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
+  String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
+  LightWeightLinkedSetBlock excessReplicas = 
excessReplicateMap.get(uuid);
+  if (excessReplicas != null) {
+if (excessReplicas.remove(block)) {
+  excessBlocksCount.decrementAndGet();
+  if (excessReplicas.isEmpty()) {
+excessReplicateMap.remove(uuid);
+  }
+}
+  }
+}
+  }
+
   public int getCapacity() {
 return blocksMap.getCapacity();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b85bbca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 2ba609d..438c2d7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ 

hadoop git commit: HDFS-6945. BlockManager should remove a block from excessReplicateMap and decrement ExcessBlocks metric when the block is removed. (aajisaka)

2015-03-31 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk e428fea73 - 18a91fe4d


HDFS-6945. BlockManager should remove a block from excessReplicateMap and 
decrement ExcessBlocks metric when the block is removed. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18a91fe4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18a91fe4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18a91fe4

Branch: refs/heads/trunk
Commit: 18a91fe4df0448d9f7de91602646ecf5a51c52e4
Parents: e428fea
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Apr 1 09:07:28 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Wed Apr 1 09:07:28 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockManager.java| 22 ++--
 .../namenode/metrics/TestNameNodeMetrics.java   |  9 ++--
 3 files changed, 30 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18a91fe4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f3537b0..4247ea6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -398,6 +398,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7997. The first non-existing xattr should also throw IOException.
 (zhouyingchao via yliu)
 
+HDFS-6945. BlockManager should remove a block from excessReplicateMap and
+decrement ExcessBlocks metric when the block is removed. (aajisaka)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18a91fe4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f6e15a3..d9aee62 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3351,8 +3351,7 @@ public class BlockManager {
 // file already removes them from the block map below.
 block.setNumBytes(BlockCommand.NO_ACK);
 addToInvalidates(block);
-corruptReplicas.removeFromCorruptReplicasMap(block);
-blocksMap.removeBlock(block);
+removeBlockFromMap(block);
 // Remove the block from pendingReplications and neededReplications
 pendingReplications.remove(block);
 neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
@@ -3528,11 +3527,30 @@ public class BlockManager {
   }
 
   public void removeBlockFromMap(Block block) {
+removeFromExcessReplicateMap(block);
 blocksMap.removeBlock(block);
 // If block is removed from blocksMap remove it from corruptReplicasMap
 corruptReplicas.removeFromCorruptReplicasMap(block);
   }
 
+  /**
+   * If a block is removed from blocksMap, remove it from excessReplicateMap.
+   */
+  private void removeFromExcessReplicateMap(Block block) {
+for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
+  String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
+  LightWeightLinkedSetBlock excessReplicas = 
excessReplicateMap.get(uuid);
+  if (excessReplicas != null) {
+if (excessReplicas.remove(block)) {
+  excessBlocksCount.decrementAndGet();
+  if (excessReplicas.isEmpty()) {
+excessReplicateMap.remove(uuid);
+  }
+}
+  }
+}
+  }
+
   public int getCapacity() {
 return blocksMap.getCapacity();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18a91fe4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 64ea1e4..b390391 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -280,11 +280,16 @@ public 

hadoop git commit: YARN-3304. Addendum patch. Cleaning up ResourceCalculatorProcessTree APIs for public use and removing inconsistencies in the default values. (Junping Du and Karthik Kambatla via vin

2015-03-31 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b85bbca74 - c980e34bc


YARN-3304. Addendum patch. Cleaning up ResourceCalculatorProcessTree APIs for 
public use and removing inconsistencies in the default values. (Junping Du and 
Karthik Kambatla via vinodkv)

(cherry picked from commit 7610925e90155dfe5edce05da31574e4fb81b948)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c980e34b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c980e34b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c980e34b

Branch: refs/heads/branch-2
Commit: c980e34bc5f864d2f7db2419b26313514517c8aa
Parents: b85bbca
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Tue Mar 31 17:27:46 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Tue Mar 31 17:28:48 2015 -0700

--
 .../yarn/util/ProcfsBasedProcessTree.java   | 82 +++--
 .../util/ResourceCalculatorProcessTree.java | 55 ++-
 .../yarn/util/WindowsBasedProcessTree.java  | 24 -
 .../yarn/util/TestProcfsBasedProcessTree.java   | 96 ++--
 .../util/TestResourceCalculatorProcessTree.java | 10 ++
 .../yarn/util/TestWindowsBasedProcessTree.java  | 15 ++-
 6 files changed, 236 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c980e34b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 9996a79..df9d28a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -344,15 +344,23 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   public long getVirtualMemorySize(int olderThanAge) {
 long total = UNAVAILABLE;
 for (ProcessInfo p : processTree.values()) {
-  if ((p != null)  (p.getAge()  olderThanAge)) {
+  if (p != null) {
 if (total == UNAVAILABLE ) {
   total = 0;
 }
-total += p.getVmem();
+if (p.getAge()  olderThanAge) {
+  total += p.getVmem();
+}
   }
 }
 return total;
   }
+  
+  @Override
+  @SuppressWarnings(deprecation)
+  public long getCumulativeVmem(int olderThanAge) {
+return getVirtualMemorySize(olderThanAge);
+  }
 
   @Override
   public long getRssMemorySize(int olderThanAge) {
@@ -365,13 +373,21 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
 boolean isAvailable = false;
 long totalPages = 0;
 for (ProcessInfo p : processTree.values()) {
-  if ((p != null)  (p.getAge()  olderThanAge)) {
-totalPages += p.getRssmemPage();
+  if ((p != null) ) {
+if (p.getAge()  olderThanAge) {
+  totalPages += p.getRssmemPage();
+}
 isAvailable = true;
   }
 }
 return isAvailable ? totalPages * PAGE_SIZE : UNAVAILABLE; // convert # 
pages to byte
   }
+  
+  @Override
+  @SuppressWarnings(deprecation)
+  public long getCumulativeRssmem(int olderThanAge) {
+return getRssMemorySize(olderThanAge);
+  }
 
   /**
* Get the resident set size (RSS) memory used by all the processes
@@ -388,36 +404,42 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   private long getSmapBasedRssMemorySize(int olderThanAge) {
 long total = UNAVAILABLE;
 for (ProcessInfo p : processTree.values()) {
-  if ((p != null)  (p.getAge()  olderThanAge)) {
-ProcessTreeSmapMemInfo procMemInfo = processSMAPTree.get(p.getPid());
-if (procMemInfo != null) {
-  for (ProcessSmapMemoryInfo info : procMemInfo.getMemoryInfoList()) {
-// Do not account for r--s or r-xs mappings
-if (info.getPermission().trim()
-  .equalsIgnoreCase(READ_ONLY_WITH_SHARED_PERMISSION)
-|| info.getPermission().trim()
-  .equalsIgnoreCase(READ_EXECUTE_WITH_SHARED_PERMISSION)) {
-  continue;
-}
-if (total == UNAVAILABLE){
-  total = 0;
-}
-total +=
-Math.min(info.sharedDirty, info.pss) + info.privateDirty
-+ info.privateClean;
-if (LOG.isDebugEnabled()) {
-  LOG.debug( total( + olderThanAge + ): PID :  + 

hadoop git commit: YARN-3304. Addendum patch. Cleaning up ResourceCalculatorProcessTree APIs for public use and removing inconsistencies in the default values. (Junping Du and Karthik Kambatla via vin

2015-03-31 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 ec123abdc - 3acfde6d8


YARN-3304. Addendum patch. Cleaning up ResourceCalculatorProcessTree APIs for 
public use and removing inconsistencies in the default values. (Junping Du and 
Karthik Kambatla via vinodkv)

(cherry picked from commit 7610925e90155dfe5edce05da31574e4fb81b948)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3acfde6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3acfde6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3acfde6d

Branch: refs/heads/branch-2.7
Commit: 3acfde6d850ba98768baac99a7ec2c2b8899ec68
Parents: ec123ab
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Tue Mar 31 17:27:46 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Tue Mar 31 17:29:18 2015 -0700

--
 .../yarn/util/ProcfsBasedProcessTree.java   | 82 +++--
 .../util/ResourceCalculatorProcessTree.java | 55 ++-
 .../yarn/util/WindowsBasedProcessTree.java  | 24 -
 .../yarn/util/TestProcfsBasedProcessTree.java   | 96 ++--
 .../util/TestResourceCalculatorProcessTree.java | 10 ++
 .../yarn/util/TestWindowsBasedProcessTree.java  | 15 ++-
 6 files changed, 236 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3acfde6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 9996a79..df9d28a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -344,15 +344,23 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   public long getVirtualMemorySize(int olderThanAge) {
 long total = UNAVAILABLE;
 for (ProcessInfo p : processTree.values()) {
-  if ((p != null)  (p.getAge()  olderThanAge)) {
+  if (p != null) {
 if (total == UNAVAILABLE ) {
   total = 0;
 }
-total += p.getVmem();
+if (p.getAge()  olderThanAge) {
+  total += p.getVmem();
+}
   }
 }
 return total;
   }
+  
+  @Override
+  @SuppressWarnings(deprecation)
+  public long getCumulativeVmem(int olderThanAge) {
+return getVirtualMemorySize(olderThanAge);
+  }
 
   @Override
   public long getRssMemorySize(int olderThanAge) {
@@ -365,13 +373,21 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
 boolean isAvailable = false;
 long totalPages = 0;
 for (ProcessInfo p : processTree.values()) {
-  if ((p != null)  (p.getAge()  olderThanAge)) {
-totalPages += p.getRssmemPage();
+  if ((p != null) ) {
+if (p.getAge()  olderThanAge) {
+  totalPages += p.getRssmemPage();
+}
 isAvailable = true;
   }
 }
 return isAvailable ? totalPages * PAGE_SIZE : UNAVAILABLE; // convert # 
pages to byte
   }
+  
+  @Override
+  @SuppressWarnings(deprecation)
+  public long getCumulativeRssmem(int olderThanAge) {
+return getRssMemorySize(olderThanAge);
+  }
 
   /**
* Get the resident set size (RSS) memory used by all the processes
@@ -388,36 +404,42 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   private long getSmapBasedRssMemorySize(int olderThanAge) {
 long total = UNAVAILABLE;
 for (ProcessInfo p : processTree.values()) {
-  if ((p != null)  (p.getAge()  olderThanAge)) {
-ProcessTreeSmapMemInfo procMemInfo = processSMAPTree.get(p.getPid());
-if (procMemInfo != null) {
-  for (ProcessSmapMemoryInfo info : procMemInfo.getMemoryInfoList()) {
-// Do not account for r--s or r-xs mappings
-if (info.getPermission().trim()
-  .equalsIgnoreCase(READ_ONLY_WITH_SHARED_PERMISSION)
-|| info.getPermission().trim()
-  .equalsIgnoreCase(READ_EXECUTE_WITH_SHARED_PERMISSION)) {
-  continue;
-}
-if (total == UNAVAILABLE){
-  total = 0;
-}
-total +=
-Math.min(info.sharedDirty, info.pss) + info.privateDirty
-+ info.privateClean;
-if (LOG.isDebugEnabled()) {
-  LOG.debug( total( + olderThanAge + ): PID :  + 

hadoop git commit: YARN-3304. Addendum patch. Cleaning up ResourceCalculatorProcessTree APIs for public use and removing inconsistencies in the default values. (Junping Du and Karthik Kambatla via vin

2015-03-31 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 18a91fe4d - 7610925e9


YARN-3304. Addendum patch. Cleaning up ResourceCalculatorProcessTree APIs for 
public use and removing inconsistencies in the default values. (Junping Du and 
Karthik Kambatla via vinodkv)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7610925e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7610925e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7610925e

Branch: refs/heads/trunk
Commit: 7610925e90155dfe5edce05da31574e4fb81b948
Parents: 18a91fe
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Tue Mar 31 17:27:46 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Tue Mar 31 17:27:46 2015 -0700

--
 .../yarn/util/ProcfsBasedProcessTree.java   | 82 +++--
 .../util/ResourceCalculatorProcessTree.java | 55 ++-
 .../yarn/util/WindowsBasedProcessTree.java  | 24 -
 .../yarn/util/TestProcfsBasedProcessTree.java   | 96 ++--
 .../util/TestResourceCalculatorProcessTree.java | 10 ++
 .../yarn/util/TestWindowsBasedProcessTree.java  | 15 ++-
 6 files changed, 236 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7610925e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 9996a79..df9d28a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -344,15 +344,23 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   public long getVirtualMemorySize(int olderThanAge) {
 long total = UNAVAILABLE;
 for (ProcessInfo p : processTree.values()) {
-  if ((p != null)  (p.getAge()  olderThanAge)) {
+  if (p != null) {
 if (total == UNAVAILABLE ) {
   total = 0;
 }
-total += p.getVmem();
+if (p.getAge()  olderThanAge) {
+  total += p.getVmem();
+}
   }
 }
 return total;
   }
+  
+  @Override
+  @SuppressWarnings(deprecation)
+  public long getCumulativeVmem(int olderThanAge) {
+return getVirtualMemorySize(olderThanAge);
+  }
 
   @Override
   public long getRssMemorySize(int olderThanAge) {
@@ -365,13 +373,21 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
 boolean isAvailable = false;
 long totalPages = 0;
 for (ProcessInfo p : processTree.values()) {
-  if ((p != null)  (p.getAge()  olderThanAge)) {
-totalPages += p.getRssmemPage();
+  if ((p != null) ) {
+if (p.getAge()  olderThanAge) {
+  totalPages += p.getRssmemPage();
+}
 isAvailable = true;
   }
 }
 return isAvailable ? totalPages * PAGE_SIZE : UNAVAILABLE; // convert # 
pages to byte
   }
+  
+  @Override
+  @SuppressWarnings(deprecation)
+  public long getCumulativeRssmem(int olderThanAge) {
+return getRssMemorySize(olderThanAge);
+  }
 
   /**
* Get the resident set size (RSS) memory used by all the processes
@@ -388,36 +404,42 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   private long getSmapBasedRssMemorySize(int olderThanAge) {
 long total = UNAVAILABLE;
 for (ProcessInfo p : processTree.values()) {
-  if ((p != null)  (p.getAge()  olderThanAge)) {
-ProcessTreeSmapMemInfo procMemInfo = processSMAPTree.get(p.getPid());
-if (procMemInfo != null) {
-  for (ProcessSmapMemoryInfo info : procMemInfo.getMemoryInfoList()) {
-// Do not account for r--s or r-xs mappings
-if (info.getPermission().trim()
-  .equalsIgnoreCase(READ_ONLY_WITH_SHARED_PERMISSION)
-|| info.getPermission().trim()
-  .equalsIgnoreCase(READ_EXECUTE_WITH_SHARED_PERMISSION)) {
-  continue;
-}
-if (total == UNAVAILABLE){
-  total = 0;
-}
-total +=
-Math.min(info.sharedDirty, info.pss) + info.privateDirty
-+ info.privateClean;
-if (LOG.isDebugEnabled()) {
-  LOG.debug( total( + olderThanAge + ): PID :  + p.getPid()
-  + , SharedDirty :  + info.sharedDirty + , PSS : 
- 

hadoop git commit: YARN-3428. Debug log resources to be localized for a container. (kasha)

2015-03-31 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c980e34bc - 93b29d962


YARN-3428. Debug log resources to be localized for a container. (kasha)

(cherry picked from commit 2daa478a6420585dc13cea2111580ed5fe347bc1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93b29d96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93b29d96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93b29d96

Branch: refs/heads/branch-2
Commit: 93b29d962d75ce8a491c11dfadf906400458b0d4
Parents: c980e34
Author: Karthik Kambatla ka...@apache.org
Authored: Tue Mar 31 17:34:40 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Tue Mar 31 17:35:11 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  2 ++
 .../localizer/ResourceLocalizationService.java  | 12 ++--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93b29d96/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 273a788..30c1826 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -41,6 +41,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3258. FairScheduler: Need to add more logging to investigate
 allocations. (Anubhav Dhoot via ozawa)
 
+YARN-3428. Debug log resources to be localized for a container. (kasha)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93b29d96/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index dd50ead..4236392 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -448,6 +448,10 @@ public class ResourceLocalizationService extends 
CompositeService
   .getApplicationId());
   for (LocalResourceRequest req : e.getValue()) {
 tracker.handle(new ResourceRequestEvent(req, e.getKey(), ctxt));
+if (LOG.isDebugEnabled()) {
+  LOG.debug(Localizing  + req.getPath() +
+   for container  + c.getContainerId());
+}
   }
 }
   }
@@ -456,10 +460,14 @@ public class ResourceLocalizationService extends 
CompositeService
 ResourceRetentionSet retain =
   new ResourceRetentionSet(delService, cacheTargetSize);
 retain.addResources(publicRsrc);
-LOG.debug(Resource cleanup (public)  + retain);
+if (LOG.isDebugEnabled()) {
+  LOG.debug(Resource cleanup (public)  + retain);
+}
 for (LocalResourcesTracker t : privateRsrc.values()) {
   retain.addResources(t);
-  LOG.debug(Resource cleanup  + t.getUser() + : + retain);
+  if (LOG.isDebugEnabled()) {
+LOG.debug(Resource cleanup  + t.getUser() + : + retain);
+  }
 }
 //TODO Check if appRsrcs should also be added to the retention set.
   }