hadoop git commit: Fix minor issues when merging trunk (DFS-6407) to HDFS-7285 branch.

2015-08-24 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285-merge babc5182c - b2049f95a (forced update)


Fix minor issues when merging trunk (DFS-6407) to HDFS-7285 branch.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2049f95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2049f95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2049f95

Branch: refs/heads/HDFS-7285-merge
Commit: b2049f95a657333ab50431656d43927604c2c1d5
Parents: cc10933
Author: Zhe Zhang zhezh...@cloudera.com
Authored: Thu Aug 20 11:33:55 2015 -0700
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Aug 24 09:52:56 2015 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java| 1 -
 .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java   | 4 +---
 .../apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java| 2 +-
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java | 5 ++---
 .../apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java| 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java| 2 +-
 .../java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java | 3 ++-
 .../org/apache/hadoop/hdfs/server/balancer/TestBalancer.java| 1 +
 10 files changed, 14 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2049f95/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index abc37c9..35c4f9a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -44,7 +44,6 @@ import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ByteBufferReadable;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2049f95/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 5d03fd2..00f3a65 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -24,8 +24,6 @@ import java.nio.channels.ClosedChannelException;
 import java.util.EnumSet;
 import java.util.concurrent.atomic.AtomicReference;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
@@ -358,7 +356,7 @@ public class DFSOutputStream extends FSOutputSummer
   String[] favoredNodes) throws IOException {
 TraceScope scope =
 dfsClient.getPathTraceScope(newStreamForAppend, src);
-if(stat.getReplication() == 0) {
+if(stat.getErasureCodingPolicy() != null) {
   throw new IOException(Not support appending to a striping layout file 
yet.);
 }
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2049f95/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 9387176..bf11914 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.util.LightWeightGSet;
  * where the replicas of the block, or blocks belonging to the erasure coding
  * block group, are stored.
  */
-public 

hadoop git commit: HDFS-8930. Block report lease may leak if the 2nd full block report comes when NN is still in safemode (Colin P. McCabe via Jing Zhao)

2015-08-24 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk feaf03499 - b5ce87f84


HDFS-8930. Block report lease may leak if the 2nd full block report comes when 
NN is still in safemode (Colin P. McCabe via Jing Zhao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5ce87f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5ce87f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5ce87f8

Branch: refs/heads/trunk
Commit: b5ce87f84d9de0a5347ab38c0567a5a70d1fbfd7
Parents: feaf034
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Aug 24 11:31:56 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon Aug 24 11:31:56 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java   | 1 +
 .../hdfs/server/blockmanagement/TestBlockReportRateLimiting.java  | 2 --
 3 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5ce87f8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0b7bc90..c90c247 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1201,6 +1201,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8942. Update hyperlink to rack awareness page in HDFS Architecture
 documentation. (Masatake Iwasaki via aajisaka)
 
+HDFS-8930. Block report lease may leak if the 2nd full block report comes
+when NN is still in safemode (Colin P. McCabe via Jing Zhao)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5ce87f8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5a77ad4..7f02612 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1849,6 +1849,7 @@ public class BlockManager implements BlockStatsMXBean {
 blockLog.info(BLOCK* processReport: 
 + discarded non-initial block report from {}
 +  because namenode still in startup phase, nodeID);
+blockReportLeaseManager.removeLease(node);
 return !node.hasStaleStorages();
   }
   if (context != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5ce87f8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
index 86a7511..3cc1b02 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
@@ -29,7 +29,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
@@ -40,7 +39,6 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.util.HashSet;
-import java.util.List;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;



hadoop git commit: HDFS-8930. Block report lease may leak if the 2nd full block report comes when NN is still in safemode (Colin P. McCabe via Jing Zhao)

2015-08-24 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6dc732f2f - 87d013370


HDFS-8930. Block report lease may leak if the 2nd full block report comes when 
NN is still in safemode (Colin P. McCabe via Jing Zhao)

(cherry picked from commit b5ce87f84d9de0a5347ab38c0567a5a70d1fbfd7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87d01337
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87d01337
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87d01337

Branch: refs/heads/branch-2
Commit: 87d0133703ccfaa68dddbc78f026276c6cb389c8
Parents: 6dc732f
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Aug 24 11:31:56 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon Aug 24 11:33:21 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java   | 1 +
 .../hdfs/server/blockmanagement/TestBlockReportRateLimiting.java  | 2 --
 3 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87d01337/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 241540f..a52367b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -857,6 +857,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8942. Update hyperlink to rack awareness page in HDFS Architecture
 documentation. (Masatake Iwasaki via aajisaka)
 
+HDFS-8930. Block report lease may leak if the 2nd full block report comes
+when NN is still in safemode (Colin P. McCabe via Jing Zhao)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87d01337/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ce6bcc7..c45ecc1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1838,6 +1838,7 @@ public class BlockManager implements BlockStatsMXBean {
 blockLog.info(BLOCK* processReport: 
 + discarded non-initial block report from {}
 +  because namenode still in startup phase, nodeID);
+blockReportLeaseManager.removeLease(node);
 return !node.hasStaleStorages();
   }
   if (context != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87d01337/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
index 86a7511..3cc1b02 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
@@ -29,7 +29,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
@@ -40,7 +39,6 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.util.HashSet;
-import java.util.List;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;



hadoop git commit: YARN-3896. RMNode transitioned from RUNNING to REBOOTED because its response id has not been reset synchronously. (Jun Gong via rohithsharmaks)

2015-08-24 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f7ee22505 - 6dc732f2f


YARN-3896. RMNode transitioned from RUNNING to REBOOTED because its response id 
has not been reset synchronously. (Jun Gong via rohithsharmaks)

(cherry picked from commit feaf0349949e831ce3f25814c1bbff52f17bfe8f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6dc732f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6dc732f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6dc732f2

Branch: refs/heads/branch-2
Commit: 6dc732f2f79afe8438dfa2a2b20063671d08c705
Parents: f7ee225
Author: Rohith Sharma K S rohithsharm...@apache.org
Authored: Mon Aug 24 11:25:07 2015 +0530
Committer: Rohith Sharma K S rohithsharm...@apache.org
Committed: Mon Aug 24 11:30:46 2015 +0530

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |  3 ++
 .../yarn/sls/scheduler/RMNodeWrapper.java   |  5 +++
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../resourcemanager/ResourceTrackerService.java |  2 +
 .../server/resourcemanager/rmnode/RMNode.java   |  7 +++-
 .../resourcemanager/rmnode/RMNodeImpl.java  | 15 +---
 .../yarn/server/resourcemanager/MockNodes.java  |  4 ++
 .../resourcetracker/TestNMReconnect.java| 39 
 8 files changed, 72 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dc732f2/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 440779c..2d2c3e0 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -149,6 +149,9 @@ public class NodeInfo {
   return null;
 }
 
+public void resetLastNodeHeartBeatResponse() {
+}
+
 public ListUpdatedContainerInfo pullContainerUpdates() {
   ArrayListUpdatedContainerInfo list = new 
ArrayListUpdatedContainerInfo();
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dc732f2/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index a6633ae..ecc4734 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -135,6 +135,11 @@ public class RMNodeWrapper implements RMNode {
   }
 
   @Override
+  public void resetLastNodeHeartBeatResponse() {
+node.getLastNodeHeartBeatResponse().setResponseId(0);
+  }
+
+  @Override
   @SuppressWarnings(unchecked)
   public ListUpdatedContainerInfo pullContainerUpdates() {
 ListUpdatedContainerInfo list = Collections.EMPTY_LIST;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dc732f2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 66e08bf..8942d91 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -740,6 +740,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3986. getTransferredContainers in AbstractYarnScheduler should be 
present
 in YarnScheduler interface instead. (Varun Saxena via rohithsharmaks)
 
+YARN-3896. RMNode transitioned from RUNNING to REBOOTED because its 
response id 
+has not been reset synchronously. (Jun Gong via rohithsharmaks)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dc732f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index 3c2c09b..100e991 100644
--- 

hadoop git commit: Fix minor issues when merging trunk (DFS-6407) to HDFS-7285 branch.

2015-08-24 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285-merge 6fed3c5f0 - babc5182c (forced update)


Fix minor issues when merging trunk (DFS-6407) to HDFS-7285 branch.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/babc5182
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/babc5182
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/babc5182

Branch: refs/heads/HDFS-7285-merge
Commit: babc5182c2f646bdb69061e8e04c61cb2e149b18
Parents: cc10933
Author: Zhe Zhang zhezh...@cloudera.com
Authored: Thu Aug 20 11:33:55 2015 -0700
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Sun Aug 23 22:46:47 2015 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java| 1 -
 .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java   | 4 +---
 .../apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java| 2 +-
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java | 5 ++---
 .../apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java| 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java| 2 +-
 .../java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java | 3 ++-
 9 files changed, 13 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/babc5182/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index abc37c9..35c4f9a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -44,7 +44,6 @@ import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ByteBufferReadable;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/babc5182/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 5d03fd2..00f3a65 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -24,8 +24,6 @@ import java.nio.channels.ClosedChannelException;
 import java.util.EnumSet;
 import java.util.concurrent.atomic.AtomicReference;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
@@ -358,7 +356,7 @@ public class DFSOutputStream extends FSOutputSummer
   String[] favoredNodes) throws IOException {
 TraceScope scope =
 dfsClient.getPathTraceScope(newStreamForAppend, src);
-if(stat.getReplication() == 0) {
+if(stat.getErasureCodingPolicy() != null) {
   throw new IOException(Not support appending to a striping layout file 
yet.);
 }
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/babc5182/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 9387176..bf11914 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.util.LightWeightGSet;
  * where the replicas of the block, or blocks belonging to the erasure coding
  * block group, are stored.
  */
-public abstract class  BlockInfo extends Block
+public abstract class BlockInfo 

[48/50] [abbrv] hadoop git commit: Merge commit '456e901a4c5c639267ee87b8e5f1319f256d20c2' (HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI. Contributed by Haohui Mai.) into

2015-08-24 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 000,7d32568..fb10e9c
mode 00,100644..100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@@ -1,0 -1,873 +1,880 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * License); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an AS IS BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ /**
+  * These .proto interfaces are private and stable.
+  * Please see http://wiki.apache.org/hadoop/Compatibility
+  * for what changes are allowed for a *stable* .proto interface.
+  */
+ 
+ option java_package = org.apache.hadoop.hdfs.protocol.proto;
+ option java_outer_classname = ClientNamenodeProtocolProtos;
+ option java_generic_services = true;
+ option java_generate_equals_and_hash = true;
+ package hadoop.hdfs;
+ 
+ import Security.proto;
+ import hdfs.proto;
+ import acl.proto;
+ import xattr.proto;
+ import encryption.proto;
+ import inotify.proto;
++import erasurecoding.proto;
+ 
+ /**
+  * The ClientNamenodeProtocol Service defines the interface between a client 
+  * (as runnign inside a MR Task) and the Namenode.
+  * See org.apache.hadoop.hdfs.protocol.ClientProtocol for the javadoc 
+  * for each of the methods.
+  * The exceptions declared in the above class also apply to this protocol.
+  * Exceptions are unwrapped and thrown by the  PB libraries.
+  */
+ 
+ message GetBlockLocationsRequestProto {
+   required string src = 1; // file name
+   required uint64 offset = 2;  // range start offset
+   required uint64 length = 3;  // range length
+ }
+ 
+ message GetBlockLocationsResponseProto {
+   optional LocatedBlocksProto locations = 1;
+ }
+ 
+ message GetServerDefaultsRequestProto { // No parameters
+ }
+ 
+ message GetServerDefaultsResponseProto {
+   required FsServerDefaultsProto serverDefaults = 1;
+ }
+ 
+ enum CreateFlagProto {
+   CREATE = 0x01;// Create a file
+   OVERWRITE = 0x02; // Truncate/overwrite a file. Same as POSIX O_TRUNC
+   APPEND = 0x04;// Append to a file
+   LAZY_PERSIST = 0x10; // File with reduced durability guarantees.
+   NEW_BLOCK = 0x20; // Write data to a new block when appending
+ }
+ 
+ message CreateRequestProto {
+   required string src = 1;
+   required FsPermissionProto masked = 2;
+   required string clientName = 3;
+   required uint32 createFlag = 4;  // bits set using CreateFlag
+   required bool createParent = 5;
+   required uint32 replication = 6; // Short: Only 16 bits used
+   required uint64 blockSize = 7;
+   repeated CryptoProtocolVersionProto cryptoProtocolVersion = 8;
+ }
+ 
+ message CreateResponseProto {
+   optional HdfsFileStatusProto fs = 1;
+ }
+ 
+ message AppendRequestProto {
+   required string src = 1;
+   required string clientName = 2;
+   optional uint32 flag = 3; // bits set using CreateFlag
+ }
+ 
+ message AppendResponseProto {
+   optional LocatedBlockProto block = 1;
+   optional HdfsFileStatusProto stat = 2;
+ }
+ 
+ message SetReplicationRequestProto {
+   required string src = 1;
+   required uint32 replication = 2; // Short: Only 16 bits used
+ }
+ 
+ message SetReplicationResponseProto {
+   required bool result = 1;
+ }
+ 
+ message SetStoragePolicyRequestProto {
+   required string src = 1;
+   required string policyName = 2;
+ }
+ 
+ message SetStoragePolicyResponseProto { // void response
+ }
+ 
+ message GetStoragePolicyRequestProto {
+   required string path = 1;
+ }
+ 
+ message GetStoragePolicyResponseProto {
+   required BlockStoragePolicyProto storagePolicy = 1;
+ }
+ 
+ message GetStoragePoliciesRequestProto { // void request
+ }
+ 
+ message GetStoragePoliciesResponseProto {
+   repeated BlockStoragePolicyProto policies = 1;
+ }
+ 
+ message SetPermissionRequestProto {
+   required string src = 1;
+   required FsPermissionProto permission = 2;
+ }
+ 
+ message SetPermissionResponseProto { // void response
+ }
+ 
+ message SetOwnerRequestProto {
+   required string src = 1;
+   optional string 

[36/50] [abbrv] hadoop git commit: HDFS-8824. Do not use small blocks for balancing the cluster.

2015-08-24 Thread zhz
HDFS-8824. Do not use small blocks for balancing the cluster.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bc0a4f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bc0a4f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bc0a4f2

Branch: refs/heads/HDFS-7285
Commit: 2bc0a4f299fbd8035e29f62ce9cd22e209a62805
Parents: 1569228
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Fri Aug 14 13:03:19 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Fri Aug 14 13:03:19 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 +++
 .../hadoop/hdfs/server/balancer/Balancer.java   |  9 -
 .../hadoop/hdfs/server/balancer/Dispatcher.java | 38 
 .../hdfs/server/balancer/TestBalancer.java  | 11 --
 5 files changed, 47 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc0a4f2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 880284c..be799af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -786,6 +786,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7649. Multihoming docs should emphasize using hostnames in
 configurations. (Brahma Reddy Battula via Arpit Agarwal)
 
+HDFS-8824. Do not use small blocks for balancing the cluster.  (szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc0a4f2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 4ef7a4d..1af3a49 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -353,6 +353,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_BALANCER_DISPATCHERTHREADS_DEFAULT = 200;
   public static final String  DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY = 
dfs.balancer.max-size-to-move;
   public static final longDFS_BALANCER_MAX_SIZE_TO_MOVE_DEFAULT = 
10L*1024*1024*1024;
+  public static final String  DFS_BALANCER_GETBLOCKS_SIZE_KEY = 
dfs.balancer.getBlocks.size;
+  public static final longDFS_BALANCER_GETBLOCKS_SIZE_DEFAULT = 
2L*1024*1024*1024; // 2GB
+  public static final String  DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY = 
dfs.balancer.getBlocks.min-block-size;
+  public static final longDFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_DEFAULT = 
10L*1024*1024; // 10MB
 
 
   public static final String  DFS_MOVER_MOVEDWINWIDTH_KEY = 
dfs.mover.movedWinWidth;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc0a4f2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 742a300..6fc024e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -251,10 +251,17 @@ public class Balancer {
 DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
 DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
 
+final long getBlocksSize = getLong(conf,
+DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_KEY,
+DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_DEFAULT);
+final long getBlocksMinBlockSize = getLong(conf,
+DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY,
+DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_DEFAULT);
+
 this.nnc = theblockpool;
 this.dispatcher = new Dispatcher(theblockpool, p.nodesToBeIncluded,
 p.nodesToBeExcluded, movedWinWidth, moverThreads, dispatcherThreads,
-maxConcurrentMovesPerNode, conf);
+maxConcurrentMovesPerNode, getBlocksSize, getBlocksMinBlockSize, conf);
 this.threshold = p.threshold;
 this.policy = p.policy;
 

[33/50] [abbrv] hadoop git commit: HDFS-8270. create() always retried with hardcoded timeout when file already exists with open lease (Contributed by J.Andreina) Moved to 2.6.1

2015-08-24 Thread zhz
HDFS-8270. create() always retried with hardcoded timeout when file already 
exists with open lease (Contributed by J.Andreina)
Moved to 2.6.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84bf7129
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84bf7129
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84bf7129

Branch: refs/heads/HDFS-7285
Commit: 84bf71295a5e52b2a7bb69440a885a25bc75f544
Parents: fc508b4
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Aug 14 16:13:30 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri Aug 14 16:13:30 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84bf7129/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dba4535..0b28709 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1265,9 +1265,6 @@ Release 2.7.1 - 2015-07-06
 HDFS-8451. DFSClient probe for encryption testing interprets empty URI
 property for enabled. (Steve Loughran via xyao)
 
-HDFS-8270. create() always retried with hardcoded timeout when file already
-exists with open lease (J.Andreina via vinayakumarb)
-
 HDFS-8523. Remove usage information on unsupported operation
 fsck -showprogress from branch-2 (J.Andreina via vinayakumarb)
 
@@ -2339,6 +2336,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7225. Remove stale block invalidation work when DN re-registers with
 different UUID. (Zhe Zhang and Andrew Wang)
 
+HDFS-8270. create() always retried with hardcoded timeout when file already
+exists with open lease (J.Andreina via vinayakumarb)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



[22/50] [abbrv] hadoop git commit: YARN-4047. ClientRMService getApplications has high scheduler lock contention. Contributed by Jason Lowe

2015-08-24 Thread zhz
YARN-4047. ClientRMService getApplications has high scheduler lock contention. 
Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a445fcf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a445fcf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a445fcf

Branch: refs/heads/HDFS-7285
Commit: 7a445fcfabcf9c6aae219051f65d3f6cb8feb87c
Parents: 38aed1a
Author: Jian He jia...@apache.org
Authored: Thu Aug 13 16:02:57 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Thu Aug 13 16:02:57 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  |  3 +++
 .../yarn/server/resourcemanager/ClientRMService.java | 11 +++
 2 files changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a445fcf/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3d19734..a4c16b1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -766,6 +766,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4005. Completed container whose app is finished is possibly not
 removed from NMStateStore. (Jun Gong via jianhe)
 
+YARN-4047. ClientRMService getApplications has high scheduler lock 
contention.
+(Jason Lowe via jianhe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a445fcf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index e4199be..2dcfe9a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -752,13 +752,9 @@ public class ClientRMService extends AbstractService 
implements
   RMApp application = appsIter.next();
 
   // Check if current application falls under the specified scope
-  boolean allowAccess = checkAccess(callerUGI, application.getUser(),
-  ApplicationAccessType.VIEW_APP, application);
   if (scope == ApplicationsRequestScope.OWN 
   !callerUGI.getUserName().equals(application.getUser())) {
 continue;
-  } else if (scope == ApplicationsRequestScope.VIEWABLE  !allowAccess) {
-continue;
   }
 
   if (applicationTypes != null  !applicationTypes.isEmpty()) {
@@ -807,6 +803,13 @@ public class ClientRMService extends AbstractService 
implements
 }
   }
 
+  // checkAccess can grab the scheduler lock so call it last
+  boolean allowAccess = checkAccess(callerUGI, application.getUser(),
+  ApplicationAccessType.VIEW_APP, application);
+  if (scope == ApplicationsRequestScope.VIEWABLE  !allowAccess) {
+continue;
+  }
+
   reports.add(application.createAndGetApplicationReport(
   callerUGI.getUserName(), allowAccess));
 }



[20/50] [abbrv] hadoop git commit: HDFS-7649. Multihoming docs should emphasize using hostnames in configurations. (Contributed by Brahma Reddy Battula)

2015-08-24 Thread zhz
HDFS-7649. Multihoming docs should emphasize using hostnames in configurations. 
(Contributed by Brahma Reddy Battula)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae57d60d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae57d60d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae57d60d

Branch: refs/heads/HDFS-7285
Commit: ae57d60d8239916312bca7149e2285b2ed3b123a
Parents: b73181f
Author: Arpit Agarwal a...@apache.org
Authored: Thu Aug 13 13:38:24 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Thu Aug 13 14:26:40 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/site/markdown/HdfsMultihoming.md  | 2 ++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae57d60d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c88368e..ce9a3f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -783,6 +783,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8622. Implement GETCONTENTSUMMARY operation for WebImageViewer.
 (Jagadesh Kiran N via aajisaka)
 
+HDFS-7649. Multihoming docs should emphasize using hostnames in
+configurations. (Brahma Reddy Battula via Arpit Agarwal)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae57d60d/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsMultihoming.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsMultihoming.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsMultihoming.md
index fc7cc3d..0c98d29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsMultihoming.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsMultihoming.md
@@ -54,6 +54,8 @@ By default `HDFS` endpoints are specified as either hostnames 
or IP addresses. I
 
 The solution is to have separate setting for server endpoints to force binding 
the wildcard IP address `INADDR_ANY` i.e. `0.0.0.0`. Do NOT supply a port 
number with any of these settings.
 
+**NOTE:** Prefer using hostnames over IP addresses in master/slave 
configuration files.
+
 property
   namedfs.namenode.rpc-bind-host/name
   value0.0.0.0/value



[26/50] [abbrv] hadoop git commit: HDFS-7235. DataNode#transferBlock should report blocks that don't exist using reportBadBlock (yzhang via cmccabe) Moved CHANGES.txt entry to 2.6.1

2015-08-24 Thread zhz
HDFS-7235. DataNode#transferBlock should report blocks that don't exist using 
reportBadBlock (yzhang via cmccabe)
Moved CHANGES.txt entry to 2.6.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2b4bc9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2b4bc9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2b4bc9b

Branch: refs/heads/HDFS-7285
Commit: f2b4bc9b6a1bd3f9dbfc4e85c1b9bde238da3627
Parents: d25cb8f
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Aug 14 11:37:39 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri Aug 14 11:37:39 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2b4bc9b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1f72264..e4e2896 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1819,9 +1819,6 @@ Release 2.7.0 - 2015-04-20
 HDFS-7301. TestMissingBlocksAlert should use MXBeans instead of old web UI.
 (Zhe Zhang via wheat9)
 
-HDFS-7235. DataNode#transferBlock should report blocks that don't exist
-using reportBadBlock (yzhang via cmccabe)
-
 HDFS-7263. Snapshot read can reveal future bytes for appended files.
 (Tao Luo via shv)
 
@@ -2339,6 +2336,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7213. processIncrementalBlockReport performance degradation.
 (Eric Payne via kihwal)
 
+HDFS-7235. DataNode#transferBlock should report blocks that don't exist
+using reportBadBlock (yzhang via cmccabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



[32/50] [abbrv] hadoop git commit: HADOOP-7139. Allow appending to existing SequenceFiles (Contributed by kanaka kumar avvaru) Moved to 2.6.1

2015-08-24 Thread zhz
HADOOP-7139. Allow appending to existing SequenceFiles (Contributed by kanaka 
kumar avvaru)
Moved to 2.6.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc508b41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc508b41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc508b41

Branch: refs/heads/HDFS-7285
Commit: fc508b41a90a193f06a0e28ee40862bf9ee0f0c7
Parents: 05ed690
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Aug 14 15:55:29 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri Aug 14 15:55:29 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc508b41/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 57ef1c5..d07adcb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -678,9 +678,6 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11965. determine-flaky-tests needs a summary mode.
 (Yufei Gu via Yongjun Zhang)
 
-HADOOP-7139. Allow appending to existing SequenceFiles
-(kanaka kumar avvaru via vinayakumarb)
-
 HADOOP-11958. MetricsSystemImpl fails to show backtrace when an error
 occurs (Jason Lowe via jeagles)
 
@@ -1872,6 +1869,9 @@ Release 2.6.1 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-7139. Allow appending to existing SequenceFiles
+(kanaka kumar avvaru via vinayakumarb)
+
   OPTIMIZATIONS
 
   BUG FIXES



[06/50] [abbrv] hadoop git commit: YARN-3887. Support changing Application priority during runtime. Contributed by Sunil G

2015-08-24 Thread zhz
YARN-3887. Support changing Application priority during runtime. Contributed by 
Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa1d84ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa1d84ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa1d84ae

Branch: refs/heads/HDFS-7285
Commit: fa1d84ae2739a1e76f58b9c96d1378f9453cc0d2
Parents: b56daff
Author: Jian He jia...@apache.org
Authored: Mon Aug 10 20:51:54 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Mon Aug 10 20:51:54 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../resourcemanager/recovery/RMStateStore.java  |   5 +
 .../scheduler/AbstractYarnScheduler.java|   7 +
 .../scheduler/SchedulerApplicationAttempt.java  |   2 +-
 .../scheduler/YarnScheduler.java|  11 +
 .../scheduler/capacity/CapacityScheduler.java   |  49 
 .../AbstractComparatorOrderingPolicy.java   |   6 +
 .../capacity/TestApplicationPriority.java   | 260 +++
 8 files changed, 342 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa1d84ae/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5e27a2f..ada1056 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -164,6 +164,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3873. PendingApplications in LeafQueue should also use 
OrderingPolicy. 
 (Sunil G via wangda)
 
+YARN-3887. Support changing Application priority during runtime. (Sunil G
+via jianhe)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa1d84ae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index 5036450..affbee1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -706,6 +706,11 @@ public abstract class RMStateStore extends AbstractService 
{
 dispatcher.getEventHandler().handle(new RMStateUpdateAppEvent(appState));
   }
 
+  public void updateApplicationStateSynchronously(
+  ApplicationStateData appState) {
+handleStoreEvent(new RMStateUpdateAppEvent(appState));
+  }
+
   public void updateFencedState() {
 handleStoreEvent(new RMStateStoreEvent(RMStateStoreEventType.FENCED));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa1d84ae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index d69600a..ed05189 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -701,4 +701,11 @@ public abstract class AbstractYarnScheduler
 // specific scheduler.
 return Priority.newInstance(0);
   }
+
+  @Override
+  public void updateApplicationPriority(Priority newPriority,
+  ApplicationId applicationId) throws YarnException {
+// Dummy Implementation till Application Priority changes are done in
+// specific scheduler.
+  }
 }


[47/50] [abbrv] hadoop git commit: Merge commit '456e901a4c5c639267ee87b8e5f1319f256d20c2' (HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI. Contributed by Haohui Mai.) into

2015-08-24 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index b71e59e,000..4ca8fe6
mode 100644,00..100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@@ -1,653 -1,0 +1,653 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * License); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an AS IS BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.hadoop.hdfs;
 +
 +import java.io.IOException;
 +import java.io.InterruptedIOException;
 +import java.nio.ByteBuffer;
 +import java.nio.channels.ClosedChannelException;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.EnumSet;
 +import java.util.List;
 +import java.util.concurrent.BlockingQueue;
 +import java.util.concurrent.LinkedBlockingQueue;
 +
 +import org.apache.hadoop.HadoopIllegalArgumentException;
 +import org.apache.hadoop.classification.InterfaceAudience;
 +import org.apache.hadoop.fs.CreateFlag;
 +import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 +import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 +import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 +import org.apache.hadoop.io.MultipleIOException;
 +import org.apache.hadoop.io.erasurecode.CodecUtil;
 +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 +import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 +import org.apache.hadoop.util.DataChecksum;
 +import org.apache.hadoop.util.Progressable;
 +import org.apache.htrace.Sampler;
 +import org.apache.htrace.Trace;
 +import org.apache.htrace.TraceScope;
 +
 +import com.google.common.base.Preconditions;
 +
 +
 +/**
 + * This class supports writing files in striped layout and erasure coded 
format.
 + * Each stripe contains a sequence of cells.
 + */
 +@InterfaceAudience.Private
 +public class DFSStripedOutputStream extends DFSOutputStream {
 +  static class MultipleBlockingQueueT {
 +private final ListBlockingQueueT queues;
 +
 +MultipleBlockingQueue(int numQueue, int queueSize) {
 +  queues = new ArrayList(numQueue);
 +  for (int i = 0; i  numQueue; i++) {
 +queues.add(new LinkedBlockingQueueT(queueSize));
 +  }
 +}
 +
 +boolean isEmpty() {
 +  for(int i = 0; i  queues.size(); i++) {
 +if (!queues.get(i).isEmpty()) {
 +  return false;
 +}
 +  }
 +  return true;
 +}
 +
 +int numQueues() {
 +  return queues.size();
 +}
 +
 +void offer(int i, T object) {
 +  final boolean b = queues.get(i).offer(object);
 +  Preconditions.checkState(b, Failed to offer  + object
 +  +  to queue, i= + i);
 +}
 +
 +T take(int i) throws InterruptedIOException {
 +  try {
 +return queues.get(i).take();
 +  } catch(InterruptedException ie) {
 +throw DFSUtil.toInterruptedIOException(take interrupted, i= + i, 
ie);
 +  }
 +}
 +
 +T poll(int i) {
 +  return queues.get(i).poll();
 +}
 +
 +T peek(int i) {
 +  return queues.get(i).peek();
 +}
 +  }
 +
 +  /** Coordinate the communication between the streamers. */
 +  class Coordinator {
 +private final MultipleBlockingQueueLocatedBlock followingBlocks;
 +private final MultipleBlockingQueueExtendedBlock endBlocks;
 +
 +private final MultipleBlockingQueueLocatedBlock newBlocks;
 +private final MultipleBlockingQueueExtendedBlock updateBlocks;
 +
 +Coordinator(final DfsClientConf conf, final int numDataBlocks,
 +final int numAllBlocks) {
 +  followingBlocks = new MultipleBlockingQueue(numAllBlocks, 1);
 +  endBlocks = new MultipleBlockingQueue(numDataBlocks, 1);
 +
 +  newBlocks = new MultipleBlockingQueue(numAllBlocks, 1);
 +  updateBlocks = new MultipleBlockingQueue(numAllBlocks, 1);
 +}
 +
 +MultipleBlockingQueueLocatedBlock getFollowingBlocks() {
 

[43/50] [abbrv] hadoop git commit: HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI. Contributed by Haohui Mai.

2015-08-24 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/456e901a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery.dataTables.min.js
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery.dataTables.min.js
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery.dataTables.min.js
new file mode 100644
index 000..85dd817
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery.dataTables.min.js
@@ -0,0 +1,160 @@
+/*! DataTables 1.10.7
+ * ©2008-2015 SpryMedia Ltd - datatables.net/license
+ */
+(function(Ea,Q,k){var P=function(h){function W(a){var 
b,c,e={};h.each(a,function(d){if((b=d.match(/^([^A-Z]+?)([A-Z])/))-1!==a aa 
ai ao as b fn i m o s .indexOf(b[1]+ 
))c=d.replace(b[0],b[2].toLowerCase()),e[c]=d,o===b[1]W(a[d])});a._hungarianMap=e}function
 H(a,b,c){a._hungarianMap||W(a);var 
e;h.each(b,function(d){e=a._hungarianMap[d];if(e!==k(c||b[e]===k))o===e.charAt(0)?(b[e]||(b[e]={}),h.extend(!0,b[e],b[d]),H(a[e],b[e],c)):b[e]=b[d]})}function
 P(a){var b=m.defaults.oLanguage,c=a.sZeroRecords;
+!a.sEmptyTable(cNo data available in 
table===b.sEmptyTable)E(a,a,sZeroRecords,sEmptyTable);!a.sLoadingRecords(cLoading...===b.sLoadingRecords)E(a,a,sZeroRecords,sLoadingRecords);a.sInfoThousands(a.sThousands=a.sInfoThousands);(a=a.sDecimal)db(a)}function
 
eb(a){A(a,ordering,bSort);A(a,orderMulti,bSortMulti);A(a,orderClasses,bSortClasses);A(a,orderCellsTop,bSortCellsTop);A(a,order,aaSorting);A(a,orderFixed,aaSortingFixed);A(a,paging,bPaginate);
+A(a,pagingType,sPaginationType);A(a,pageLength,iDisplayLength);A(a,searching,bFilter);if(a=a.aoSearchCols)for(var
 b=0,c=a.length;bc;b++)a[b]H(m.models.oSearch,a[b])}function 
fb(a){A(a,orderable,bSortable);A(a,orderData,aDataSort);A(a,orderSequence,asSorting);A(a,orderDataType,sortDataType);var
 b=a.aDataSort;b!h.isArray(b)(a.aDataSort=[b])}function gb(a){var 
a=a.oBrowser,b=h(div/).css({position:absolute,top:0,left:0,height:1,width:1,overflow:hidden}).append(h(div/).css({position:absolute,
+top:1,left:1,width:100,overflow:scroll}).append(h('div 
class=test/').css({width:100%,height:10}))).appendTo(body),c=b.find(.test);a.bScrollOversize=100===c[0].offsetWidth;a.bScrollbarLeft=1!==Math.round(c.offset().left);b.remove()}function
 hb(a,b,c,e,d,f){var 
g,j=!1;c!==k(g=c,j=!0);for(;e!==d;)a.hasOwnProperty(e)(g=j?b(g,a[e],e,a):a[e],j=!0,e+=f);return
 g}function Fa(a,b){var 
c=m.defaults.column,e=a.aoColumns.length,c=h.extend({},m.models.oColumn,c,{nTh:b?b:Q.createElement(th),sTitle:c.sTitle?
+c.sTitle:b?b.innerHTML:,aDataSort:c.aDataSort?c.aDataSort:[e],mData:c.mData?c.mData:e,idx:e});a.aoColumns.push(c);c=a.aoPreSearchCols;c[e]=h.extend({},m.models.oSearch,c[e]);ka(a,e,h(b).data())}function
 ka(a,b,c){var 
b=a.aoColumns[b],e=a.oClasses,d=h(b.nTh);if(!b.sWidthOrig){b.sWidthOrig=d.attr(width)||null;var
 
f=(d.attr(style)||).match(/width:\s*(\d+[pxem%]+)/);f(b.sWidthOrig=f[1])}c!==knull!==c(fb(c),H(m.defaults.column,c),c.mDataProp!==k!c.mData(c.mData=c.mDataProp),c.sType
+(b._sManualType=c.sType),c.className!c.sClass(c.sClass=c.className),h.extend(b,c),E(b,c,sWidth,sWidthOrig),c.iDataSort!==k(b.aDataSort=[c.iDataSort]),E(b,c,aDataSort));var
 
g=b.mData,j=R(g),i=b.mRender?R(b.mRender):null,c=function(a){returnstring===typeof
 
a-1!==a.indexOf(@)};b._bAttrSrc=h.isPlainObject(g)(c(g.sort)||c(g.type)||c(g.filter));b.fnGetData=function(a,b,c){var
 e=j(a,b,k,c);return ib?i(e,b,a,c):e};b.fnSetData=function(a,b,c){return 
S(g)(a,b,c)};number!==typeof g
+(a._rowReadObject=!0);a.oFeatures.bSort||(b.bSortable=!1,d.addClass(e.sSortableNone));a=-1!==h.inArray(asc,b.asSorting);c=-1!==h.inArray(desc,b.asSorting);!b.bSortable||!a!c?(b.sSortingClass=e.sSortableNone,b.sSortingClassJUI=):a!c?(b.sSortingClass=e.sSortableAsc,b.sSortingClassJUI=e.sSortJUIAscAllowed):!ac?(b.sSortingClass=e.sSortableDesc,b.sSortingClassJUI=e.sSortJUIDescAllowed):(b.sSortingClass=e.sSortable,b.sSortingClassJUI=e.sSortJUI)}function
 X(a){if(!1!==a.oFeatures.bAutoWidth){var b=
+a.aoColumns;Ga(a);for(var 
c=0,e=b.length;ce;c++)b[c].nTh.style.width=b[c].sWidth}b=a.oScroll;(!==b.sY||!==b.sX)Y(a);w(a,null,column-sizing,[a])}function
 la(a,b){var c=Z(a,bVisible);returnnumber===typeof c[b]?c[b]:null}function 
$(a,b){var c=Z(a,bVisible),c=h.inArray(b,c);return-1!==c?c:null}function 
aa(a){return Z(a,bVisible).length}function Z(a,b){var 
c=[];h.map(a.aoColumns,function(a,d){a[b]c.push(d)});return c}function 
Ha(a){var b=a.aoColumns,c=a.aoData,e=m.ext.type.detect,d,
+f,g,j,i,h,l,q,n;d=0;for(f=b.length;df;d++)if(l=b[d],n=[],!l.sTypel._sManualType)l.sType=l._sManualType;else
 
if(!l.sType){g=0;for(j=e.length;gj;g++){i=0;for(h=c.length;ih;i++){n[i]===k(n[i]=x(a,i,d,type));q=e[g](n[i],a);if(!qg!==e.length-1)break;if(html===q)break}if(q){l.sType=q;break}}l.sType||(l.sType=string)}}function
 ib(a,b,c,e){var 
d,f,g,j,i,o,l=a.aoColumns;if(b)for(d=b.length-1;0=d;d--){o=b[d];var 

[21/50] [abbrv] hadoop git commit: YARN-4005. Completed container whose app is finished is possibly not removed from NMStateStore. Contributed by Jun Gong

2015-08-24 Thread zhz
YARN-4005. Completed container whose app is finished is possibly not removed 
from NMStateStore. Contributed by Jun Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38aed1a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38aed1a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38aed1a9

Branch: refs/heads/HDFS-7285
Commit: 38aed1a94ed7b6da62e2445b5610bc02b1cddeeb
Parents: ae57d60
Author: Jian He jia...@apache.org
Authored: Thu Aug 13 14:46:08 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Thu Aug 13 14:46:08 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../nodemanager/NodeStatusUpdaterImpl.java  |  8 ++---
 .../nodemanager/TestNodeStatusUpdater.java  | 34 
 3 files changed, 41 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38aed1a9/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9745d9d..3d19734 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -763,6 +763,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3992. TestApplicationPriority.testApplicationPriorityAllocation fails 
 intermittently. (Contributed by Sunil G)
 
+YARN-4005. Completed container whose app is finished is possibly not
+removed from NMStateStore. (Jun Gong via jianhe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38aed1a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 30a2bd5..7c5c28b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -474,12 +474,12 @@ public class NodeStatusUpdaterImpl extends 
AbstractService implements
 } else {
   if (!isContainerRecentlyStopped(containerId)) {
 pendingCompletedContainers.put(containerId, containerStatus);
-// Adding to finished containers cache. Cache will keep it around 
at
-// least for #durationToTrackStoppedContainers duration. In the
-// subsequent call to stop container it will get removed from 
cache.
-addCompletedContainer(containerId);
   }
 }
+// Adding to finished containers cache. Cache will keep it around at
+// least for #durationToTrackStoppedContainers duration. In the
+// subsequent call to stop container it will get removed from cache.
+addCompletedContainer(containerId);
   } else {
 containerStatuses.add(containerStatus);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38aed1a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index bc48adf..a9ef72f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -994,6 +994,40 @@ public class TestNodeStatusUpdater {
 Assert.assertTrue(containerIdSet.contains(runningContainerId));
   }
 
+  @Test(timeout = 1)
+  public void testCompletedContainersIsRecentlyStopped() throws Exception {
+NodeManager nm = new NodeManager();
+nm.init(conf);
+

[18/50] [abbrv] hadoop git commit: HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)

2015-08-24 Thread zhz
HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53bef9c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53bef9c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53bef9c5

Branch: refs/heads/HDFS-7285
Commit: 53bef9c5b98dee87d4ffaf35415bc38e2f876ed8
Parents: 40f8151
Author: yliu y...@apache.org
Authored: Thu Aug 13 16:45:20 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Aug 13 16:45:20 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../org/apache/hadoop/net/NetworkTopology.java  | 38 ++--
 .../apache/hadoop/net/TestNetworkTopology.java  |  1 +
 3 files changed, 21 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bef9c5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 78f12e4..c80be05 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -749,6 +749,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12318. Expose underlying LDAP exceptions in SaslPlainServer. (Mike
 Yoder via atm)
 
+HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bef9c5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 970ad40..fe6e439 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -166,10 +166,11 @@ public class NetworkTopology {
  * @return true if the node is added; false otherwise
  */
 boolean add(Node n) {
-  if (!isAncestor(n))
-throw new IllegalArgumentException(n.getName()+, which is located at 
-+n.getNetworkLocation()+, is not a decendent of 
-+getPath(this));
+  if (!isAncestor(n)) {
+throw new IllegalArgumentException(n.getName()
++ , which is located at  + n.getNetworkLocation()
++ , is not a descendent of  + getPath(this));
+  }
   if (isParent(n)) {
 // this node is the parent of n; add n directly
 n.setParent(this);
@@ -227,12 +228,11 @@ public class NetworkTopology {
  * @return true if the node is deleted; false otherwise
  */
 boolean remove(Node n) {
-  String parent = n.getNetworkLocation();
-  String currentPath = getPath(this);
-  if (!isAncestor(n))
+  if (!isAncestor(n)) {
 throw new IllegalArgumentException(n.getName()
-   +, which is located at 
-   +parent+, is not a descendent of 
+currentPath);
++ , which is located at  + n.getNetworkLocation()
++ , is not a descendent of  + getPath(this));
+  }
   if (isParent(n)) {
 // this node is the parent of n; remove n directly
 if (childrenMap.containsKey(n.getName())) {
@@ -250,15 +250,8 @@ public class NetworkTopology {
   } else {
 // find the next ancestor node: the parent node
 String parentName = getNextAncestorName(n);
-InnerNode parentNode = null;
-int i;
-for(i=0; ichildren.size(); i++) {
-  if (children.get(i).getName().equals(parentName)) {
-parentNode = (InnerNode)children.get(i);
-break;
-  }
-}
-if (parentNode==null) {
+InnerNode parentNode = (InnerNode)childrenMap.get(parentName);
+if (parentNode == null) {
   return false;
 }
 // remove n from the parent node
@@ -266,8 +259,13 @@ public class NetworkTopology {
 // if the parent node has no children, remove the parent node too
 if (isRemoved) {
   if (parentNode.getNumOfChildren() == 0) {
-Node prev = children.remove(i);
-childrenMap.remove(prev.getName());
+for(int i=0; i  children.size(); i++) {
+  if (children.get(i).getName().equals(parentName)) {
+children.remove(i);
+childrenMap.remove(parentName);
+   

[14/50] [abbrv] hadoop git commit: HADOOP-12318. Expose underlying LDAP exceptions in SaslPlainServer. Contributed by Mike Yoder.

2015-08-24 Thread zhz
HADOOP-12318. Expose underlying LDAP exceptions in SaslPlainServer. Contributed 
by Mike Yoder.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/820f864a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/820f864a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/820f864a

Branch: refs/heads/HDFS-7285
Commit: 820f864a26d90e9f4a3584577df581dcac20f9b6
Parents: 3e715a4
Author: Aaron T. Myers a...@apache.org
Authored: Wed Aug 12 15:16:05 2015 -0700
Committer: Aaron T. Myers a...@apache.org
Committed: Wed Aug 12 15:24:16 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/security/SaslPlainServer.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/820f864a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7d7982f..e9be2e0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -746,6 +746,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12280. Skip unit tests based on maven profile rather than
 NativeCodeLoader.isNativeCodeLoaded (Masatake Iwasaki via Colin P. McCabe)
 
+HADOOP-12318. Expose underlying LDAP exceptions in SaslPlainServer. (Mike
+Yoder via atm)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/820f864a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java
index 7d1b980..7c74f4a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java
@@ -105,7 +105,7 @@ public class SaslPlainServer implements SaslServer {
 authz = ac.getAuthorizedID();
   }
 } catch (Exception e) {
-  throw new SaslException(PLAIN auth failed:  + e.getMessage());
+  throw new SaslException(PLAIN auth failed:  + e.getMessage(), e);
 } finally {
   completed = true;
 }



[07/50] [abbrv] hadoop git commit: HDFS-8805. Archival Storage: getStoragePolicy should not need superuser privilege. Contributed by Brahma Reddy Battula.

2015-08-24 Thread zhz
HDFS-8805. Archival Storage: getStoragePolicy should not need superuser 
privilege. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fc3c779
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fc3c779
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fc3c779

Branch: refs/heads/HDFS-7285
Commit: 1fc3c779a422bafdb86ad1a5b2349802dda1cb62
Parents: fa1d84a
Author: Jing Zhao ji...@apache.org
Authored: Tue Aug 11 10:28:18 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Tue Aug 11 10:28:18 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java   | 2 +-
 .../hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java  | 9 +++--
 .../hadoop/hdfs/server/namenode/FSDirWriteFileOp.java   | 2 +-
 4 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fc3c779/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 15c9df5..61def02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -775,6 +775,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8818. Changes the global moveExecutor to per datanode executors and
 changes MAX_SIZE_TO_MOVE to be configurable.  (szetszwo)
 
+HDFS-8805. Archival Storage: getStoragePolicy should not need superuser 
privilege.
+(Brahma Reddy Battula via jing9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fc3c779/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
index abb2dc8..3d79d09 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
@@ -135,7 +135,7 @@ final class FSDirAppendOp {
 }
 
 HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, src, false,
-FSDirectory.isReservedRawName(srcArg), true);
+FSDirectory.isReservedRawName(srcArg));
 if (lb != null) {
   NameNode.stateChangeLog.debug(
   DIR* NameSystem.appendFile: file {} for {} at {} block {} block

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fc3c779/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 14f4d66..4a45074 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -105,13 +105,11 @@ class FSDirStatAndListingOp {
 byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 src = fsd.resolvePath(pc, src, pathComponents);
 final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
-boolean isSuperUser = true;
 if (fsd.isPermissionEnabled()) {
   fsd.checkPermission(pc, iip, false, null, null, null, null, false);
-  isSuperUser = pc.isSuperUser();
 }
 return getFileInfo(fsd, src, resolveLink,
-FSDirectory.isReservedRawName(srcArg), isSuperUser);
+FSDirectory.isReservedRawName(srcArg));
   }
 
   /**
@@ -369,8 +367,7 @@ class FSDirStatAndListingOp {
   }
 
   static HdfsFileStatus getFileInfo(
-  FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath,
-  boolean includeStoragePolicy)
+  FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath)
 throws IOException {
 String srcs = FSDirectory.normalizePath(src);
 if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
@@ -385,7 +382,7 @@ class FSDirStatAndListingOp {
 fsd.readLock();
 try {
   final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink);
-  return 

[46/50] [abbrv] hadoop git commit: Merge commit '456e901a4c5c639267ee87b8e5f1319f256d20c2' (HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI. Contributed by Haohui Mai.) into

2015-08-24 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 36ce133,508da85..dfea5f3
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@@ -42,7 -44,8 +44,9 @@@ import javax.management.ObjectName
  import org.apache.hadoop.HadoopIllegalArgumentException;
  import org.apache.hadoop.classification.InterfaceAudience;
  import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.fs.FileEncryptionInfo;
  import org.apache.hadoop.fs.StorageType;
+ import org.apache.hadoop.hdfs.DFSUtilClient;
  import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
  import org.apache.hadoop.hdfs.DFSConfigKeys;
  import org.apache.hadoop.hdfs.DFSUtil;
@@@ -52,12 -55,10 +56,11 @@@ import org.apache.hadoop.hdfs.protocol.
  import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
  import org.apache.hadoop.hdfs.protocol.DatanodeID;
  import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 +import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
  import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
--import org.apache.hadoop.fs.FileEncryptionInfo;
  import org.apache.hadoop.hdfs.protocol.LocatedBlock;
  import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 +import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
  import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
  import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
  import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
@@@ -77,22 -79,17 +81,24 @@@ import org.apache.hadoop.hdfs.server.pr
  import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
  import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
  import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 +import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
  import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
  import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
  import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
  import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
  import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
  import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
  import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 +
 +import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
 +import static 
org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
 +
+ import org.apache.hadoop.metrics2.util.MBeans;
  import org.apache.hadoop.net.Node;
  import org.apache.hadoop.security.UserGroupInformation;
 +import org.apache.hadoop.security.token.Token;
  import org.apache.hadoop.util.Daemon;
  import org.apache.hadoop.util.LightWeightGSet;
  import org.apache.hadoop.util.Time;
@@@ -818,11 -786,12 +835,11 @@@ public class BlockManager implements Bl
  }
  return locations;
}
--  
 -  private ListLocatedBlock createLocatedBlockList(
 -  final BlockInfo[] blocks,
++
 +  private ListLocatedBlock createLocatedBlockList(final BlockInfo[] blocks,
final long offset, final long length, final int nrBlocksToReturn,
final AccessMode mode) throws IOException {
 -int curBlk = 0;
 +int curBlk;
  long curPos = 0, blkSize = 0;
  int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
  for (curBlk = 0; curBlk  nrBlocks; curBlk++) {
@@@ -875,25 -844,19 +892,26 @@@
}
  
/** @return a LocatedBlock for the given block */
-   private LocatedBlock createLocatedBlock(final BlockInfo blk, final long 
pos) {
 -  private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos
 -  ) throws IOException {
 -if (blk instanceof BlockInfoContiguousUnderConstruction) {
 -  if (blk.isComplete()) {
 -throw new IOException(
 -blk instanceof BlockInfoUnderConstruction  blk.isComplete()
 -+ , blk= + blk);
++  private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos)
++  throws IOException {
 +if (!blk.isComplete()) {
 +  if (blk.isStriped()) {
- final BlockInfoUnderConstructionStriped uc =
- (BlockInfoUnderConstructionStriped) blk;
++final BlockInfoStripedUnderConstruction uc =
++(BlockInfoStripedUnderConstruction) blk;
 +

[29/50] [abbrv] hadoop git commit: HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. Contributed by Stephen Chu. Moved CHANGES.txt entry to 2.6.1

2015-08-24 Thread zhz
HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. Contributed by Stephen Chu.
Moved CHANGES.txt entry to 2.6.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7aa8139
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7aa8139
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7aa8139

Branch: refs/heads/HDFS-7285
Commit: e7aa81394dce61cc96d480e21204263a5f2ed153
Parents: 24a11e3
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Aug 14 12:23:51 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri Aug 14 12:23:51 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7aa8139/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c84af6a..6e48c20 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1199,8 +1199,6 @@ Release 2.7.0 - 2015-04-20
 
 HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
 
-HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. (Stephen Chu via wheat9)
-
 HADOOP-11291. Log the cause of SASL connection failures.
 (Stephen Chu via cnauroth)
 



[42/50] [abbrv] hadoop git commit: HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging.

2015-08-24 Thread zhz
HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e7b7e2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e7b7e2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e7b7e2c

Branch: refs/heads/HDFS-7285
Commit: 2e7b7e2cda67eba4c03e0a2c7892d868d235b0cf
Parents: a7862d5
Author: Andrew Wang w...@apache.org
Authored: Mon Aug 17 10:16:26 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Mon Aug 17 10:17:06 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../blockmanagement/DatanodeDescriptor.java | 40 ++--
 2 files changed, 23 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e7b7e2c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1f9aab4..bfd95f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -791,6 +791,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8883. NameNode Metrics : Add FSNameSystem lock Queue Length.
 (Anu Engineer via xyao)
 
+HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging. (wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e7b7e2c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 7e12a99..87ce753 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -33,8 +33,6 @@ import java.util.Set;
 import com.google.common.annotations.VisibleForTesting;
 
 import com.google.common.collect.ImmutableList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.StorageType;
@@ -50,6 +48,8 @@ import org.apache.hadoop.hdfs.util.EnumCounters;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.util.IntrusiveCollection;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class extends the DatanodeInfo class with ephemeral information (eg
@@ -59,7 +59,8 @@ import org.apache.hadoop.util.Time;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class DatanodeDescriptor extends DatanodeInfo {
-  public static final Log LOG = LogFactory.getLog(DatanodeDescriptor.class);
+  public static final Logger LOG =
+  LoggerFactory.getLogger(DatanodeDescriptor.class);
   public static final DatanodeDescriptor[] EMPTY_ARRAY = {};
 
   // Stores status of decommissioning.
@@ -319,9 +320,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
 Map.EntryString, DatanodeStorageInfo entry = iter.next();
 DatanodeStorageInfo storageInfo = entry.getValue();
 if (storageInfo.getLastBlockReportId() != curBlockReportId) {
-  LOG.info(storageInfo.getStorageID() +  had lastBlockReportId 0x +
-  Long.toHexString(storageInfo.getLastBlockReportId()) +
-  , but curBlockReportId = 0x +
+  LOG.info({} had lastBlockReportId 0x{} but curBlockReportId = 0x{},
+  storageInfo.getStorageID(),
+  Long.toHexString(storageInfo.getLastBlockReportId()),
   Long.toHexString(curBlockReportId));
   iter.remove();
   if (zombies == null) {
@@ -446,8 +447,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
 }
 
 if (checkFailedStorages) {
-  LOG.info(Number of failed storage changes from 
-  + this.volumeFailures +  to  + volFailures);
+  if (this.volumeFailures != volFailures) {
+LOG.info(Number of failed storages changes from {} to {},
+this.volumeFailures, volFailures);
+  }
   synchronized (storageMap) {
 failedStorageInfos =
 new HashSet(storageMap.values());
@@ -498,10 +501,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
*/
   private void 

[49/50] [abbrv] hadoop git commit: Merge commit '456e901a4c5c639267ee87b8e5f1319f256d20c2' (HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI. Contributed by Haohui Mai.) into

2015-08-24 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 000,8528999..7756bb9
mode 00,100644..100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@@ -1,0 -1,1486 +1,1511 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * License); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an AS IS BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hdfs.protocol;
+ 
+ import java.io.IOException;
+ import java.util.EnumSet;
+ import java.util.List;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.crypto.CryptoProtocolVersion;
+ import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
+ import org.apache.hadoop.fs.CacheFlag;
+ import org.apache.hadoop.fs.ContentSummary;
+ import org.apache.hadoop.fs.CreateFlag;
+ import org.apache.hadoop.fs.FsServerDefaults;
+ import org.apache.hadoop.fs.Options;
+ import org.apache.hadoop.fs.StorageType;
+ import org.apache.hadoop.fs.XAttr;
+ import org.apache.hadoop.fs.XAttrSetFlag;
+ import org.apache.hadoop.fs.permission.AclEntry;
+ import org.apache.hadoop.fs.permission.AclStatus;
+ import org.apache.hadoop.fs.permission.FsAction;
+ import org.apache.hadoop.fs.permission.FsPermission;
+ import org.apache.hadoop.hdfs.inotify.EventBatchList;
+ import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+ import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+ import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
+ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
+ import org.apache.hadoop.io.EnumSetWritable;
+ import org.apache.hadoop.io.Text;
+ import org.apache.hadoop.io.retry.AtMostOnce;
+ import org.apache.hadoop.io.retry.Idempotent;
+ import org.apache.hadoop.security.KerberosInfo;
+ import org.apache.hadoop.security.token.Token;
+ import org.apache.hadoop.security.token.TokenInfo;
+ 
+ import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
+ 
+ /**
+  * ClientProtocol is used by user code via the DistributedFileSystem class to
+  * communicate with the NameNode.  User code can manipulate the directory
+  * namespace, as well as open/close file streams, etc.
+  *
+  **/
+ @InterfaceAudience.Private
+ @InterfaceStability.Evolving
+ @KerberosInfo(
+ serverPrincipal = DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
+ @TokenInfo(DelegationTokenSelector.class)
+ public interface ClientProtocol {
+ 
+   /**
+* Until version 69, this class ClientProtocol served as both
+* the client interface to the NN AND the RPC protocol used to
+* communicate with the NN.
+*
+* This class is used by both the DFSClient and the
+* NN server side to insulate from the protocol serialization.
+*
+* If you are adding/changing this interface then you need to
+* change both this class and ALSO related protocol buffer
+* wire protocol definition in ClientNamenodeProtocol.proto.
+*
+* For more details on protocol buffer wire protocol, please see
+* .../org/apache/hadoop/hdfs/protocolPB/overview.html
+*
+* The log of historical changes can be retrieved from the svn).
+* 69: Eliminate overloaded method names.
+*
+* 69L is the last version id when this class was used for protocols
+*  serialization. DO not update this version any further.
+*/
+   long versionID = 69L;
+ 
+   ///
+   // File contents
+   ///
+   /**
+* Get locations of the blocks 

[10/50] [abbrv] hadoop git commit: HDFS-8887. Expose storage type and storage ID in BlockLocation.

2015-08-24 Thread zhz
HDFS-8887. Expose storage type and storage ID in BlockLocation.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ea1a833
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ea1a833
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ea1a833

Branch: refs/heads/HDFS-7285
Commit: 1ea1a8334ea01814121490a5bfd2a0205c66d6e4
Parents: 3ae716f
Author: Andrew Wang w...@apache.org
Authored: Tue Aug 11 23:25:33 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Tue Aug 11 23:25:33 2015 -0700

--
 .../org/apache/hadoop/fs/BlockLocation.java | 55 -
 .../org/apache/hadoop/fs/TestBlockLocation.java | 23 ++--
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  2 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../apache/hadoop/fs/BlockStorageLocation.java  |  1 +
 .../hadoop/hdfs/DistributedFileSystem.java  |  6 ++
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 62 +++-
 7 files changed, 145 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea1a833/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
index 286d851..7811ef5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
@@ -34,11 +34,15 @@ public class BlockLocation {
   private String[] cachedHosts; // Datanode hostnames with a cached replica
   private String[] names; // Datanode IP:xferPort for accessing the block
   private String[] topologyPaths; // Full path name in network topology
+  private String[] storageIds; // Storage ID of each replica
+  private StorageType[] storageTypes; // Storage type of each replica
   private long offset;  // Offset of the block in the file
   private long length;
   private boolean corrupt;
 
   private static final String[] EMPTY_STR_ARRAY = new String[0];
+  private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY =
+  new StorageType[0];
 
   /**
* Default Constructor
@@ -58,6 +62,8 @@ public class BlockLocation {
 this.offset = that.offset;
 this.length = that.length;
 this.corrupt = that.corrupt;
+this.storageIds = that.storageIds;
+this.storageTypes = that.storageTypes;
   }
 
   /**
@@ -95,6 +101,13 @@ public class BlockLocation {
 
   public BlockLocation(String[] names, String[] hosts, String[] cachedHosts,
   String[] topologyPaths, long offset, long length, boolean corrupt) {
+this(names, hosts, cachedHosts, topologyPaths, null, null, offset, length,
+corrupt);
+  }
+
+  public BlockLocation(String[] names, String[] hosts, String[] cachedHosts,
+  String[] topologyPaths, String[] storageIds, StorageType[] storageTypes,
+  long offset, long length, boolean corrupt) {
 if (names == null) {
   this.names = EMPTY_STR_ARRAY;
 } else {
@@ -115,6 +128,16 @@ public class BlockLocation {
 } else {
   this.topologyPaths = topologyPaths;
 }
+if (storageIds == null) {
+  this.storageIds = EMPTY_STR_ARRAY;
+} else {
+  this.storageIds = storageIds;
+}
+if (storageTypes == null) {
+  this.storageTypes = EMPTY_STORAGE_TYPE_ARRAY;
+} else {
+  this.storageTypes = storageTypes;
+}
 this.offset = offset;
 this.length = length;
 this.corrupt = corrupt;
@@ -148,7 +171,21 @@ public class BlockLocation {
   public String[] getTopologyPaths() throws IOException {
 return topologyPaths;
   }
-  
+
+  /**
+   * Get the storageID of each replica of the block.
+   */
+  public String[] getStorageIds() {
+return storageIds;
+  }
+
+  /**
+   * Get the storage type of each replica of the block.
+   */
+  public StorageType[] getStorageTypes() {
+return storageTypes;
+  }
+
   /**
* Get the start offset of file associated with this block
*/
@@ -235,6 +272,22 @@ public class BlockLocation {
 }
   }
 
+  public void setStorageIds(String[] storageIds) {
+if (storageIds == null) {
+  this.storageIds = EMPTY_STR_ARRAY;
+} else {
+  this.storageIds = storageIds;
+}
+  }
+
+  public void setStorageTypes(StorageType[] storageTypes) {
+if (storageTypes == null) {
+  this.storageTypes = EMPTY_STORAGE_TYPE_ARRAY;
+} else {
+  this.storageTypes = storageTypes;
+}
+  }
+
   @Override
   public String toString() {
 StringBuilder result = new StringBuilder();


[16/50] [abbrv] hadoop git commit: HADOOP-12258. Need translate java.nio.file.NoSuchFileException to FileNotFoundException to avoid regression. Contributed by Zhihai Xu.

2015-08-24 Thread zhz
HADOOP-12258. Need translate java.nio.file.NoSuchFileException to 
FileNotFoundException to avoid regression. Contributed by Zhihai Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6cc8e38d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6cc8e38d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6cc8e38d

Branch: refs/heads/HDFS-7285
Commit: 6cc8e38db5b26bdd02bc6bc1c9684db2593eec25
Parents: dc2340c
Author: cnauroth cnaur...@apache.org
Authored: Wed Aug 12 16:44:53 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Wed Aug 12 16:44:53 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../apache/hadoop/fs/RawLocalFileSystem.java| 36 
 .../AbstractContractGetFileStatusTest.java  | 61 
 .../contract/AbstractContractSetTimesTest.java  | 61 
 .../hadoop/fs/contract/ContractOptions.java | 12 
 .../TestLocalFSContractGetFileStatus.java   | 33 +++
 .../localfs/TestLocalFSContractSetTimes.java| 33 +++
 .../TestRawlocalContractGetFileStatus.java  | 33 +++
 .../rawlocal/TestRawlocalContractSetTimes.java  | 33 +++
 .../src/test/resources/contract/localfs.xml | 10 
 .../src/test/resources/contract/rawlocal.xml| 10 
 .../hdfs/TestHDFSContractGetFileStatus.java | 46 +++
 .../contract/hdfs/TestHDFSContractSetTimes.java | 45 +++
 .../src/test/resources/contract/hdfs.xml| 10 
 14 files changed, 415 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cc8e38d/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e9be2e0..78f12e4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1056,6 +1056,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12302. Fix native compilation on Windows after HADOOP-7824
 (Vinayakumar B via Colin P. McCabe)
 
+HADOOP-12258. Need translate java.nio.file.NoSuchFileException to
+FileNotFoundException to avoid regression. (Zhihai Xu via cnauroth)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cc8e38d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 4728dbe..8ff65fa 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -34,6 +34,7 @@ import java.io.FileDescriptor;
 import java.net.URI;
 import java.nio.ByteBuffer;
 import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
 import java.nio.file.attribute.BasicFileAttributes;
 import java.nio.file.attribute.BasicFileAttributeView;
 import java.nio.file.attribute.FileTime;
@@ -650,13 +651,22 @@ public class RawLocalFileSystem extends FileSystem {
 private boolean isPermissionLoaded() {
   return !super.getOwner().isEmpty(); 
 }
-
+
+private static long getLastAccessTime(File f) throws IOException {
+  long accessTime;
+  try {
+accessTime = Files.readAttributes(f.toPath(),
+BasicFileAttributes.class).lastAccessTime().toMillis();
+  } catch (NoSuchFileException e) {
+throw new FileNotFoundException(File  + f +  does not exist);
+  }
+  return accessTime;
+}
+
 DeprecatedRawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs)
   throws IOException {
   super(f.length(), f.isDirectory(), 1, defaultBlockSize,
-  f.lastModified(),
-  Files.readAttributes(f.toPath(),
-BasicFileAttributes.class).lastAccessTime().toMillis(),
+  f.lastModified(), getLastAccessTime(f),
   null, null, null,
   new Path(f.getPath()).makeQualified(fs.getUri(),
 fs.getWorkingDirectory()));
@@ -773,17 +783,21 @@ public class RawLocalFileSystem extends FileSystem {
* Sets the {@link Path}'s last modified time and last access time to
* the given valid times.
*
-   * @param mtime the modification time to set (only if greater than zero).
-   * @param atime the access time to set (only if greater than zero).
+   * @param mtime the modification time to set (only 

[04/50] [abbrv] hadoop git commit: YARN-3873. PendingApplications in LeafQueue should also use OrderingPolicy. (Sunil G via wangda)

2015-08-24 Thread zhz
YARN-3873. PendingApplications in LeafQueue should also use OrderingPolicy. 
(Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf9d3c92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf9d3c92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf9d3c92

Branch: refs/heads/HDFS-7285
Commit: cf9d3c925608e8bc650d43975382ed3014081057
Parents: 8f73bdd
Author: Wangda Tan wan...@apache.org
Authored: Mon Aug 10 14:54:55 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Mon Aug 10 14:54:55 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../scheduler/capacity/CapacityScheduler.java   | 18 +
 .../capacity/CapacitySchedulerContext.java  |  2 -
 .../scheduler/capacity/LeafQueue.java   | 55 -
 ...pacityPreemptionPolicyForNodePartitions.java | 10 ++-
 .../capacity/TestApplicationLimits.java | 14 +---
 .../capacity/TestCapacityScheduler.java | 83 +++-
 .../scheduler/capacity/TestChildQueueOrder.java |  2 -
 .../scheduler/capacity/TestLeafQueue.java   | 49 ++--
 .../scheduler/capacity/TestParentQueue.java |  2 -
 .../scheduler/capacity/TestReservations.java|  2 -
 11 files changed, 106 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf9d3c92/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7d34eeb..5e27a2f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -161,6 +161,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3948. Display Application Priority in RM Web UI.(Sunil G via 
rohithsharmaks)
 
+YARN-3873. PendingApplications in LeafQueue should also use 
OrderingPolicy. 
+(Sunil G via wangda)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf9d3c92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 1d353a6..b4d0095 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -156,17 +156,6 @@ public class CapacityScheduler extends
   static final PartitionedQueueComparator partitionedQueueComparator =
   new PartitionedQueueComparator();
 
-  public static final ComparatorFiCaSchedulerApp applicationComparator =
-new ComparatorFiCaSchedulerApp() {
-@Override
-public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) {
-  if (!a1.getPriority().equals(a2.getPriority())) {
-return a1.getPriority().compareTo(a2.getPriority());
-  }
-  return a1.getApplicationId().compareTo(a2.getApplicationId());
-}
-  };
-
   @Override
   public void setConf(Configuration conf) {
   yarnConf = conf;
@@ -275,11 +264,6 @@ public class CapacityScheduler extends
   }
 
   @Override
-  public ComparatorFiCaSchedulerApp getApplicationComparator() {
-return applicationComparator;
-  }
-
-  @Override
   public ResourceCalculator getResourceCalculator() {
 return calculator;
   }
@@ -1633,7 +1617,7 @@ public class CapacityScheduler extends
 if (disposableLeafQueue.getNumApplications()  0) {
   throw new SchedulerDynamicEditException(The queue  + queueName
   +  is not empty  + disposableLeafQueue.getApplications().size()
-  +  active apps  + disposableLeafQueue.pendingApplications.size()
+  +  active apps  + 
disposableLeafQueue.getPendingApplications().size()
   +  pending apps);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf9d3c92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java

[19/50] [abbrv] hadoop git commit: HADOOP-12244. recover broken rebase during precommit (aw)

2015-08-24 Thread zhz
HADOOP-12244. recover broken rebase during precommit (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b73181f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b73181f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b73181f1

Branch: refs/heads/HDFS-7285
Commit: b73181f18702f9dc2dfc9d3cdb415b510261e74c
Parents: 53bef9c
Author: Allen Wittenauer a...@apache.org
Authored: Thu Aug 13 12:29:19 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Thu Aug 13 12:29:19 2015 -0700

--
 dev-support/test-patch.sh   | 6 ++
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 2 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b73181f1/dev-support/test-patch.sh
--
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index efcd614..a3cdc85 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -947,6 +947,12 @@ function git_checkout
 # we need to explicitly fetch in case the
 # git ref hasn't been brought in tree yet
 if [[ ${OFFLINE} == false ]]; then
+
+  if [[ -f .git/rebase-apply ]]; then
+hadoop_error ERROR: previous rebase failed. Aborting it.
+${GIT} rebase --abort
+  fi
+
   ${GIT} pull --rebase
   if [[ $? != 0 ]]; then
 hadoop_error ERROR: git pull is failing

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b73181f1/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c80be05..5d8d20d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -504,6 +504,8 @@ Trunk (Unreleased)
 HADOOP-12009. Clarify FileSystem.listStatus() sorting order  fix
 FileSystemContractBaseTest:testListStatus. (J.Andreina via jghoman)
 
+HADOOP-12244. recover broken rebase during precommit (aw)
+
   OPTIMIZATIONS
 
 HADOOP-7761. Improve the performance of raw comparisons. (todd)



[02/50] [abbrv] hadoop git commit: HDFS-8772. Fix TestStandbyIsHot#testDatanodeRestarts which occasionally fails. Contributed by Walter Su.

2015-08-24 Thread zhz
HDFS-8772. Fix TestStandbyIsHot#testDatanodeRestarts which occasionally fails. 
Contributed by Walter Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98a27d11
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98a27d11
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98a27d11

Branch: refs/heads/HDFS-7285
Commit: 98a27d110129c7b32455035831480f1c6197260b
Parents: 4bc42d7
Author: Andrew Wang w...@apache.org
Authored: Wed Aug 5 16:35:41 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Fri Aug 7 09:51:46 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 30 +++-
 .../server/namenode/ha/TestStandbyIsHot.java|  2 ++
 3 files changed, 34 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98a27d11/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 051dc8a..4e97b6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -769,6 +769,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8856. Make LeaseManager#countPath O(1). (Arpit Agarwal)
 
+HDFS-8772. Fix TestStandbyIsHot#testDatanodeRestarts which occasionally 
fails.
+(Walter Su via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98a27d11/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 0a21886..7052321 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -61,7 +61,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.TimeoutException;
 
+import com.google.common.base.Supplier;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
 import org.apache.commons.logging.Log;
@@ -86,6 +88,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Util;
@@ -114,6 +117,7 @@ import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
@@ -2386,7 +2390,31 @@ public class MiniDFSCluster {
 
 client.close();
   }
-  
+
+  /** Wait until the given namenode gets first block reports from all the 
datanodes */
+  public void waitFirstBRCompleted(int nnIndex, int timeout) throws
+  IOException, TimeoutException, InterruptedException {
+if (namenodes.size() == 0 || getNN(nnIndex) == null || 
getNN(nnIndex).nameNode == null) {
+  return;
+}
+
+final FSNamesystem ns = getNamesystem(nnIndex);
+final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
+GenericTestUtils.waitFor(new SupplierBoolean() {
+  @Override
+  public Boolean get() {
+ListDatanodeDescriptor nodes = dm.getDatanodeListForReport
+(DatanodeReportType.LIVE);
+for (DatanodeDescriptor node : nodes) {
+  if (!node.checkBlockReportReceived()) {
+return false;
+  }
+}
+return true;
+  }
+}, 100, timeout);
+  }
+
   /**
* Wait until the cluster is active and running.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98a27d11/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
--
diff --git 

[23/50] [abbrv] hadoop git commit: YARN-3987. Send AM container completed msg to NM once AM finishes. Contributed by sandflee

2015-08-24 Thread zhz
YARN-3987. Send AM container completed msg to NM once AM finishes. Contributed 
by sandflee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a030546
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a030546
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a030546

Branch: refs/heads/HDFS-7285
Commit: 0a030546e24c55662a603bb63c9029ad0ccf43fc
Parents: 7a445fc
Author: Jian He jia...@apache.org
Authored: Thu Aug 13 16:20:36 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Thu Aug 13 16:22:53 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../rmapp/attempt/RMAppAttemptImpl.java   | 14 ++
 2 files changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a030546/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a4c16b1..c451320 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -769,6 +769,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4047. ClientRMService getApplications has high scheduler lock 
contention.
 (Jason Lowe via jianhe)
 
+YARN-3987. Send AM container completed msg to NM once AM finishes.
+(sandflee via jianhe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a030546/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 0914022..80f5eb0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1658,6 +1658,16 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 }
   }
 
+  // Ack NM to remove finished AM container, not waiting for
+  // new appattempt to pull am container complete msg, new  appattempt
+  // may launch fail and leaves too many completed container in NM
+  private void sendFinishedAMContainerToNM(NodeId nodeId,
+  ContainerId containerId) {
+ListContainerId containerIdList = new ArrayListContainerId();
+containerIdList.add(containerId);
+eventHandler.handle(new RMNodeFinishedContainersPulledByAMEvent(
+nodeId, containerIdList));
+  }
 
   // Ack NM to remove finished containers from context.
   private void sendFinishedContainersToNM() {
@@ -1686,9 +1696,13 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   new ArrayListContainerStatus());
 appAttempt.finishedContainersSentToAM.get(nodeId).add(
   containerFinishedEvent.getContainerStatus());
+
 if (!appAttempt.getSubmissionContext()
   .getKeepContainersAcrossApplicationAttempts()) {
   appAttempt.sendFinishedContainersToNM();
+} else {
+  appAttempt.sendFinishedAMContainerToNM(nodeId,
+  containerFinishedEvent.getContainerStatus().getContainerId());
 }
   }
 



[08/50] [abbrv] hadoop git commit: Adding release 2.6.1 to CHANGES.txt

2015-08-24 Thread zhz
Adding release 2.6.1 to CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c796fd1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c796fd1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c796fd1

Branch: refs/heads/HDFS-7285
Commit: 7c796fd12ca5092d74e6cc7044f28496f9884983
Parents: 1fc3c77
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 
vino...@apache.org
Authored: Tue Aug 11 13:20:23 2015 -0700
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 
vino...@apache.org
Committed: Tue Aug 11 13:20:23 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c796fd1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ada1056..a1f6861 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1712,6 +1712,18 @@ Release 2.7.0 - 2015-04-20
 YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token
 renewal of applications part of a bigger workflow. (Daryn Sharp via 
vinodkv)
 
+Release 2.6.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



[03/50] [abbrv] hadoop git commit: HDFS-8866. Typo in docs: Rumtime - Runtime. Contributed by Gabor Liptak.

2015-08-24 Thread zhz
HDFS-8866. Typo in docs: Rumtime - Runtime. Contributed by Gabor Liptak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f73bdd0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f73bdd0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f73bdd0

Branch: refs/heads/HDFS-7285
Commit: 8f73bdd06b16d5048ffb6071bbcecf849c6225db
Parents: 98a27d1
Author: Jakob Homan jgho...@gmail.com
Authored: Fri Aug 7 11:38:31 2015 -0700
Committer: Jakob Homan jgho...@gmail.com
Committed: Fri Aug 7 11:38:31 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f73bdd0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4e97b6b..565e469 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1118,6 +1118,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8844. TestHDFSCLI does not cleanup the test directory (Masatake
 Iwasaki via Colin P. McCabe)
 
+HDFS-8866. Typo in docs: Rumtime - Runtime. (Gabor Liptak via jghoman)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f73bdd0/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index e8f5fee..f4f79b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -1002,7 +1002,7 @@ When an operation fails, the server may throw an 
exception. The JSON schema of e
 | `SecurityException ` | `401 Unauthorized ` |
 | `IOException ` | `403 Forbidden ` |
 | `FileNotFoundException ` | `404 Not Found ` |
-| `RumtimeException ` | `500 Internal Server Error` |
+| `RuntimeException ` | `500 Internal Server Error` |
 
 Below are examples of exception responses.
 



[05/50] [abbrv] hadoop git commit: HDFS-8818. Changes the global moveExecutor to per datanode executors and changes MAX_SIZE_TO_MOVE to be configurable.

2015-08-24 Thread zhz
HDFS-8818. Changes the global moveExecutor to per datanode executors and 
changes MAX_SIZE_TO_MOVE to be configurable.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b56daff6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b56daff6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b56daff6

Branch: refs/heads/HDFS-7285
Commit: b56daff6a186599764b046248565918b894ec116
Parents: cf9d3c9
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Mon Aug 10 16:52:02 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Mon Aug 10 16:52:02 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   3 +
 .../hadoop/hdfs/server/balancer/Balancer.java   |  44 ++--
 .../hadoop/hdfs/server/balancer/Dispatcher.java | 101 +++
 .../hdfs/server/balancer/MovedBlocks.java   |   5 +
 .../hdfs/server/balancer/TestBalancer.java  |  21 +++-
 6 files changed, 144 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b56daff6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 565e469..15c9df5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -772,6 +772,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8772. Fix TestStandbyIsHot#testDatanodeRestarts which occasionally 
fails.
 (Walter Su via wang)
 
+HDFS-8818. Changes the global moveExecutor to per datanode executors and
+changes MAX_SIZE_TO_MOVE to be configurable.  (szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b56daff6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1e5bf0d..4ef7a4d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -351,6 +351,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_BALANCER_MOVERTHREADS_DEFAULT = 1000;
   public static final String  DFS_BALANCER_DISPATCHERTHREADS_KEY = 
dfs.balancer.dispatcherThreads;
   public static final int DFS_BALANCER_DISPATCHERTHREADS_DEFAULT = 200;
+  public static final String  DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY = 
dfs.balancer.max-size-to-move;
+  public static final longDFS_BALANCER_MAX_SIZE_TO_MOVE_DEFAULT = 
10L*1024*1024*1024;
+
 
   public static final String  DFS_MOVER_MOVEDWINWIDTH_KEY = 
dfs.mover.movedWinWidth;
   public static final longDFS_MOVER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b56daff6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 8b7d802..742a300 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -34,6 +34,7 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
@@ -167,9 +168,6 @@ public class Balancer {
 
   static final Path BALANCER_ID_PATH = new Path(/system/balancer.id);
 
-  private static final long GB = 1L  30; //1GB
-  private static final long MAX_SIZE_TO_MOVE = 10*GB;
-
   private static final String USAGE = Usage: hdfs balancer
   + \n\t[-policy policy]\tthe balancing policy: 
   + BalancingPolicy.Node.INSTANCE.getName() +  or 
@@ -192,6 +190,7 @@ public class Balancer {
   private final BalancingPolicy policy;
   private final boolean runDuringUpgrade;
   private final double threshold;
+  private final long maxSizeToMove;
 
   // all data 

[01/50] [abbrv] hadoop git commit: YARN-3966. Fix excessive loggings in CapacityScheduler. (Jian He via wangda)

2015-08-24 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 b57c9a35f - 6b6a63bbb


YARN-3966. Fix excessive loggings in CapacityScheduler. (Jian He via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bc42d76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bc42d76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bc42d76

Branch: refs/heads/HDFS-7285
Commit: 4bc42d76e7fa53cb268cab0f9fe1fd8d8dbb17fd
Parents: b6265d3
Author: Wangda Tan wan...@apache.org
Authored: Fri Aug 7 09:46:57 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Fri Aug 7 09:46:57 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  2 ++
 .../scheduler/AbstractYarnScheduler.java| 10 +++---
 .../scheduler/capacity/CapacityScheduler.java   | 36 ++--
 .../scheduler/capacity/LeafQueue.java   | 18 +-
 .../scheduler/capacity/ParentQueue.java | 14 
 .../scheduler/common/fica/FiCaSchedulerApp.java |  8 +
 6 files changed, 33 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bc42d76/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index fa04337..7d34eeb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -382,6 +382,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3974. Refactor the reservation system test cases to use parameterized 
 base test. (subru via curino)
 
+YARN-3966. Fix excessive loggings in CapacityScheduler. (Jian He via 
wangda)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bc42d76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 094f77d..d69600a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -215,12 +215,12 @@ public abstract class AbstractYarnScheduler
   protected synchronized void containerLaunchedOnNode(
   ContainerId containerId, SchedulerNode node) {
 // Get the application for the finished container
-SchedulerApplicationAttempt application = getCurrentAttemptForContainer
-(containerId);
+SchedulerApplicationAttempt application =
+getCurrentAttemptForContainer(containerId);
 if (application == null) {
-  LOG.info(Unknown application 
-  + containerId.getApplicationAttemptId().getApplicationId()
-  +  launched container  + containerId +  on node:  + node);
+  LOG.info(Unknown application  + containerId.getApplicationAttemptId()
+  .getApplicationId() +  launched container  + containerId
+  +  on node:  + node);
   this.rmContext.getDispatcher().getEventHandler()
 .handle(new RMNodeCleanContainerEvent(node.getNodeID(), containerId));
   return;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bc42d76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 0b39d35..1d353a6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 

[24/50] [abbrv] hadoop git commit: HADOOP-12322. Typos in rpcmetrics.java. (Contributed by Anu Engineer)

2015-08-24 Thread zhz
HADOOP-12322. Typos in rpcmetrics.java. (Contributed by Anu Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b1cefc5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b1cefc5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b1cefc5

Branch: refs/heads/HDFS-7285
Commit: 6b1cefc561bf407daf745606275c03b9cda5ef4d
Parents: 0a03054
Author: Arpit Agarwal a...@apache.org
Authored: Thu Aug 13 21:17:30 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Thu Aug 13 21:17:30 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt| 3 +++
 .../main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java| 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b1cefc5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5d8d20d..e458042 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1063,6 +1063,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12258. Need translate java.nio.file.NoSuchFileException to
 FileNotFoundException to avoid regression. (Zhihai Xu via cnauroth)
 
+HADOOP-12322. typos in rpcmetrics.java. (Anu Engineer via
+Arpit Agarwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b1cefc5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
index e90e516..bc9aa89 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
@@ -85,7 +85,7 @@ public class RpcMetrics {
   @Metric(Number of sent bytes) MutableCounterLong sentBytes;
   @Metric(Queue time) MutableRate rpcQueueTime;
   MutableQuantiles[] rpcQueueTimeMillisQuantiles;
-  @Metric(Processsing time) MutableRate rpcProcessingTime;
+  @Metric(Processing time) MutableRate rpcProcessingTime;
   MutableQuantiles[] rpcProcessingTimeMillisQuantiles;
   @Metric(Number of authentication failures)
   MutableCounterLong rpcAuthenticationFailures;
@@ -93,7 +93,7 @@ public class RpcMetrics {
   MutableCounterLong rpcAuthenticationSuccesses;
   @Metric(Number of authorization failures)
   MutableCounterLong rpcAuthorizationFailures;
-  @Metric(Number of authorization sucesses)
+  @Metric(Number of authorization successes)
   MutableCounterLong rpcAuthorizationSuccesses;
   @Metric(Number of client backoff requests)
   MutableCounterLong rpcClientBackoff;
@@ -108,7 +108,7 @@ public class RpcMetrics {
 
   // Public instrumentation methods that could be extracted to an
   // abstract class if we decide to do custom instrumentation classes a la
-  // JobTrackerInstrumenation. The methods with //@Override comment are
+  // JobTrackerInstrumentation. The methods with //@Override comment are
   // candidates for abstract methods in a abstract instrumentation class.
 
   /**



[13/50] [abbrv] hadoop git commit: HDFS-8879. Quota by storage type usage incorrectly initialized upon namenode restart. Contributed by Xiaoyu Yao.

2015-08-24 Thread zhz
HDFS-8879. Quota by storage type usage incorrectly initialized upon namenode 
restart. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e715a4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e715a4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e715a4f

Branch: refs/heads/HDFS-7285
Commit: 3e715a4f4c46bcd8b3054cb0566e526c46bd5d66
Parents: e5003be
Author: Xiaoyu Yao x...@apache.org
Authored: Tue Aug 11 21:42:53 2015 -0700
Committer: Xiaoyu Yao x...@apache.org
Committed: Wed Aug 12 15:20:54 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../apache/hadoop/hdfs/server/namenode/FSImage.java   |  6 ++
 .../hdfs/server/namenode/TestQuotaByStorageType.java  | 14 ++
 3 files changed, 15 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e715a4f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6c2e0f9..df9b742 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1128,6 +1128,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8866. Typo in docs: Rumtime - Runtime. (Gabor Liptak via jghoman)
 
+HDFS-8879. Quota by storage type usage incorrectly initialized upon 
namenode
+restart. (xyao)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e715a4f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 0dd7855..1f8cea3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -907,11 +907,9 @@ public class FSImage implements Closeable {
 +  quota =  + ssQuota +   consumed =  + ssConsumed);
   }
 
-  final EnumCountersStorageType typeSpaces =
-  new EnumCountersStorageType(StorageType.class);
+  final EnumCountersStorageType typeSpaces = counts.getTypeSpaces();
   for (StorageType t : StorageType.getTypesSupportingQuota()) {
-final long typeSpace = counts.getTypeSpaces().get(t) -
-parentTypeSpaces.get(t);
+final long typeSpace = typeSpaces.get(t) - parentTypeSpaces.get(t);
 final long typeQuota = q.getTypeSpaces().get(t);
 if (Quota.isViolated(typeQuota, typeSpace)) {
   LOG.warn(Storage type quota violation in image for 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e715a4f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
index 6703066..f56c5a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
@@ -67,10 +67,7 @@ public class TestQuotaByStorageType {
 .storageTypes(new StorageType[]{StorageType.SSD, StorageType.DEFAULT})
 .build();
 cluster.waitActive();
-
-fsdir = cluster.getNamesystem().getFSDirectory();
-dfs = cluster.getFileSystem();
-fsn = cluster.getNamesystem();
+refreshClusterState();
   }
 
   @After
@@ -80,6 +77,13 @@ public class TestQuotaByStorageType {
 }
   }
 
+  // Cluster state must be refreshed after each start/restart in the test
+  private void refreshClusterState() throws IOException{
+fsdir = cluster.getNamesystem().getFSDirectory();
+dfs = cluster.getFileSystem();
+fsn = cluster.getNamesystem();
+  }
+
   @Test(timeout = 6)
   public void testQuotaByStorageTypeWithFileCreateOneSSD() throws Exception {
 testQuotaByStorageTypeWithFileCreateCase(
@@ -662,6 +666,7 @@ public class TestQuotaByStorageType {
 
 // Restart namenode to make sure the editlog is correct
 cluster.restartNameNode(true);
+refreshClusterState();
 
 INode testDirNodeAfterNNRestart = 

[25/50] [abbrv] hadoop git commit: HDFS-7213. processIncrementalBlockReport performance degradation. Contributed by Eric Payne. Moved CHANGES.TXT entry to 2.6.1

2015-08-24 Thread zhz
HDFS-7213. processIncrementalBlockReport performance degradation. Contributed 
by Eric Payne.
Moved CHANGES.TXT entry to 2.6.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d25cb8fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d25cb8fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d25cb8fe

Branch: refs/heads/HDFS-7285
Commit: d25cb8fe12d00faf3e8f3bfd23fd1b01981a340f
Parents: 6b1cefc
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Aug 14 11:23:51 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri Aug 14 11:23:51 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d25cb8fe/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ce9a3f1..1f72264 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1265,9 +1265,6 @@ Release 2.7.1 - 2015-07-06
 HDFS-8451. DFSClient probe for encryption testing interprets empty URI
 property for enabled. (Steve Loughran via xyao)
 
-HDFS-8486. DN startup may cause severe data loss (Daryn Sharp via Colin P.
-McCabe)
-
 HDFS-8270. create() always retried with hardcoded timeout when file already
 exists with open lease (J.Andreina via vinayakumarb)
 
@@ -1407,9 +1404,6 @@ Release 2.7.0 - 2015-04-20
 HDFS-5928. Show namespace and namenode ID on NN dfshealth page.
 (Siqi Li via wheat9)
 
-HDFS-7213. processIncrementalBlockReport performance degradation.
-(Eric Payne via kihwal)
-
 HDFS-7280. Use netty 4 in WebImageViewer. (wheat9)
 
 HDFS-3342. SocketTimeoutException in BlockSender.sendChunks could
@@ -2339,6 +2333,12 @@ Release 2.6.1 - UNRELEASED
 HDFS-7733. NFS: readdir/readdirplus return null directory
 attribute on failure. (Arpit Agarwal)
 
+HDFS-8486. DN startup may cause severe data loss (Daryn Sharp via Colin P.
+McCabe)
+
+HDFS-7213. processIncrementalBlockReport performance degradation.
+(Eric Payne via kihwal)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



[27/50] [abbrv] hadoop git commit: HDFS-7263. Snapshot read can reveal future bytes for appended files. Contributed by Tao Luo. Moved CHANGES.txt entry to 2.6.1

2015-08-24 Thread zhz
HDFS-7263. Snapshot read can reveal future bytes for appended files. 
Contributed by Tao Luo.
Moved CHANGES.txt entry to 2.6.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa264114
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa264114
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa264114

Branch: refs/heads/HDFS-7285
Commit: fa2641143c0d74c4fef122d79f27791e15d3b43f
Parents: f2b4bc9
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Aug 14 11:45:43 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri Aug 14 11:45:43 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa264114/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e4e2896..1507cbe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1819,9 +1819,6 @@ Release 2.7.0 - 2015-04-20
 HDFS-7301. TestMissingBlocksAlert should use MXBeans instead of old web UI.
 (Zhe Zhang via wheat9)
 
-HDFS-7263. Snapshot read can reveal future bytes for appended files.
-(Tao Luo via shv)
-
 HDFS-7315. DFSTestUtil.readFileBuffer opens extra FSDataInputStream.
 (Plamen Jeliazkov via wheat9)
 
@@ -2339,6 +2336,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7235. DataNode#transferBlock should report blocks that don't exist
 using reportBadBlock (yzhang via cmccabe)
 
+HDFS-7263. Snapshot read can reveal future bytes for appended files.
+(Tao Luo via shv)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



Git Push Summary

2015-08-24 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285-backup [created] b57c9a35f


hadoop git commit: HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs. Contributed by Anu Engineer

2015-08-24 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 95f8e9369 - 7a0a31586


HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs. 
Contributed by Anu Engineer

(cherry picked from commit 48774d0a45d95557affbd6bbaf8035cc9575ef36)

Conflicts:
hadoop-common-project/hadoop-common/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a0a3158
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a0a3158
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a0a3158

Branch: refs/heads/branch-2
Commit: 7a0a31586a2af12dd62f3df783fe087f2eb66a26
Parents: 95f8e93
Author: Xiaoyu Yao x...@apache.org
Authored: Mon Aug 24 14:31:24 2015 -0700
Committer: Xiaoyu Yao x...@apache.org
Committed: Mon Aug 24 14:33:37 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../fs/CommonConfigurationKeysPublic.java   |  5 ++
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  5 +-
 .../main/java/org/apache/hadoop/ipc/Server.java | 60 +++
 .../apache/hadoop/ipc/WritableRpcEngine.java|  3 +
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   | 48 
 .../apache/hadoop/metrics2/lib/MutableStat.java |  7 +-
 .../src/main/resources/core-default.xml |  9 +++
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java  | 77 +++-
 .../org/apache/hadoop/test/MetricsAsserts.java  |  2 +-
 .../hadoop-common/src/test/proto/test.proto |  7 ++
 .../src/test/proto/test_rpc_service.proto   |  1 +
 12 files changed, 223 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a0a3158/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 837b84b..6c27abe 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -243,6 +243,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-9891. CLIMiniCluster instructions fail with MiniYarnCluster
 ClassNotFoundException (Darrell Taylor via aw)
 
+HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs.
+(Anu Engineer via xyao)
+
  OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a0a3158/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index c3190b2..3c603d9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -240,6 +240,11 @@ public class CommonConfigurationKeysPublic {
   /** Default value for IPC_SERVER_MAX_CONNECTIONS_KEY */
   public static final int IPC_SERVER_MAX_CONNECTIONS_DEFAULT = 0;
 
+  /** Logs if a RPC is really slow compared to rest of RPCs. */
+  public static final String IPC_SERVER_LOG_SLOW_RPC =
+ipc.server.log.slow.rpc;
+  public static final boolean IPC_SERVER_LOG_SLOW_RPC_DEFAULT = false;
+
   /** See a href={@docRoot}/../core-default.htmlcore-default.xml/a */
   public static final String  HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY =
 hadoop.rpc.socket.factory.class.default;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a0a3158/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index abb494a..df5ce5f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -567,7 +567,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   /**
* This is a server side method, which is invoked over RPC. On success
* the return response has protobuf response payload. On failure, the
-   * exception name and the stack trace are return in the resposne.
+   * exception name and the stack trace are returned in the response.
  

[3/4] hadoop git commit: HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu.

2015-08-24 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/95f8e936/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
new file mode 100644
index 000..78325a3
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
@@ -0,0 +1,647 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.shortcircuit;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.BitSet;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Random;
+
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.fs.InvalidRequestException;
+import org.apache.hadoop.hdfs.ExtendedBlockId;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.io.nativeio.NativeIO.POSIX;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import sun.misc.Unsafe;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ComparisonChain;
+import com.google.common.primitives.Ints;
+
+/**
+ * A shared memory segment used to implement short-circuit reads.
+ */
+public class ShortCircuitShm {
+  private static final Logger LOG = 
LoggerFactory.getLogger(ShortCircuitShm.class);
+
+  protected static final int BYTES_PER_SLOT = 64;
+
+  private static final Unsafe unsafe = safetyDance();
+
+  private static Unsafe safetyDance() {
+try {
+  Field f = Unsafe.class.getDeclaredField(theUnsafe);
+  f.setAccessible(true);
+  return (Unsafe)f.get(null);
+} catch (Throwable e) {
+  LOG.error(failed to load misc.Unsafe, e);
+}
+return null;
+  }
+
+  /**
+   * Calculate the usable size of a shared memory segment.
+   * We round down to a multiple of the slot size and do some validation.
+   *
+   * @param stream The stream we're using.
+   * @return   The usable size of the shared memory segment.
+   */
+  private static int getUsableLength(FileInputStream stream)
+  throws IOException {
+int intSize = Ints.checkedCast(stream.getChannel().size());
+int slots = intSize / BYTES_PER_SLOT;
+if (slots == 0) {
+  throw new IOException(size of shared memory segment was  +
+  intSize + , but that is not enough to hold even one slot.);
+}
+return slots * BYTES_PER_SLOT;
+  }
+
+  /**
+   * Identifies a DfsClientShm.
+   */
+  public static class ShmId implements ComparableShmId {
+private static final Random random = new Random();
+private final long hi;
+private final long lo;
+
+/**
+ * Generate a random ShmId.
+ * 
+ * We generate ShmIds randomly to prevent a malicious client from
+ * successfully guessing one and using that to interfere with another
+ * client.
+ */
+public static ShmId createRandom() {
+  return new ShmId(random.nextLong(), random.nextLong());
+}
+
+public ShmId(long hi, long lo) {
+  this.hi = hi;
+  this.lo = lo;
+}
+
+public long getHi() {
+  return hi;
+}
+
+public long getLo() {
+  return lo;
+}
+
+@Override
+public boolean equals(Object o) {
+  if ((o == null) || (o.getClass() != this.getClass())) {
+return false;
+  }
+  ShmId other = (ShmId)o;
+  return new EqualsBuilder().
+  append(hi, other.hi).
+  append(lo, other.lo).
+  isEquals();
+}
+
+@Override
+public int hashCode() {
+  return new HashCodeBuilder().
+  append(this.hi).
+  append(this.lo).
+  toHashCode();
+}
+
+@Override
+public String toString() {
+  return String.format(%016x%016x, hi, lo);
+   

[1/4] hadoop git commit: HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu.

2015-08-24 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a727c6db0 - 95f8e9369


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95f8e936/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
deleted file mode 100644
index 17365fb..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.util;
-
-import java.io.EOFException;
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-import com.google.common.base.Preconditions;
-
-/**
- * An InputStream implementations which reads from some other InputStream
- * but expects an exact number of bytes. Any attempts to read past the
- * specified number of bytes will return as if the end of the stream
- * was reached. If the end of the underlying stream is reached prior to
- * the specified number of bytes, an EOFException is thrown.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class ExactSizeInputStream extends FilterInputStream {
-  private int remaining;
-
-  /**
-   * Construct an input stream that will read no more than
-   * 'numBytes' bytes.
-   * 
-   * If an EOF occurs on the underlying stream before numBytes
-   * bytes have been read, an EOFException will be thrown.
-   * 
-   * @param in the inputstream to wrap
-   * @param numBytes the number of bytes to read
-   */
-  public ExactSizeInputStream(InputStream in, int numBytes) {
-super(in);
-Preconditions.checkArgument(numBytes = 0,
-Negative expected bytes: , numBytes);
-this.remaining = numBytes;
-  }
-
-  @Override
-  public int available() throws IOException {
-return Math.min(super.available(), remaining);
-  }
-
-  @Override
-  public int read() throws IOException {
-// EOF if we reached our limit
-if (remaining = 0) {
-  return -1;
-}
-final int result = super.read();
-if (result = 0) {
-  --remaining;
-} else if (remaining  0) {
-  // Underlying stream reached EOF but we haven't read the expected
-  // number of bytes.
-  throw new EOFException(
-  Premature EOF. Expected  + remaining + more bytes);
-}
-return result;
-  }
-
-  @Override
-  public int read(final byte[] b, final int off, int len)
-  throws IOException {
-if (remaining = 0) {
-  return -1;
-}
-len = Math.min(len, remaining);
-final int result = super.read(b, off, len);
-if (result = 0) {
-  remaining -= result;
-} else if (remaining  0) {
-  // Underlying stream reached EOF but we haven't read the expected
-  // number of bytes.
-  throw new EOFException(
-  Premature EOF. Expected  + remaining + more bytes);
-}
-return result;
-  }
-
-  @Override
-  public long skip(final long n) throws IOException {
-final long result = super.skip(Math.min(n, remaining));
-if (result  0) {
-  remaining -= result;
-} else if (remaining  0) {
-  // Underlying stream reached EOF but we haven't read the expected
-  // number of bytes.
-  throw new EOFException(
-  Premature EOF. Expected  + remaining + more bytes);
-}
-return result;
-  }
-  
-  @Override
-  public boolean markSupported() {
-return false;
-  }
-
-  @Override
-  public void mark(int readlimit) {
-throw new UnsupportedOperationException();
-  }
-  
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95f8e936/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
--
diff --git 

[2/4] hadoop git commit: HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu.

2015-08-24 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/95f8e936/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 02b20d6..a95f397 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -698,7 +698,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB 
implements
   RpcController controller, GetDatanodeReportRequestProto req)
   throws ServiceException {
 try {
-  List? extends DatanodeInfoProto result = PBHelper.convert(server
+  List? extends DatanodeInfoProto result = PBHelperClient.convert(server
   .getDatanodeReport(PBHelper.convert(req.getType(;
   return GetDatanodeReportResponseProto.newBuilder()
   .addAllDi(result).build();
@@ -892,7 +892,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB 
implements
   server.setQuota(req.getPath(), req.getNamespaceQuota(),
   req.getStoragespaceQuota(),
   req.hasStorageType() ?
-  PBHelper.convertStorageType(req.getStorageType()): null);
+  PBHelperClient.convertStorageType(req.getStorageType()): null);
   return VOID_SETQUOTA_RESPONSE;
 } catch (IOException e) {
   throw new ServiceException(e);
@@ -992,7 +992,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB 
implements
   GetDelegationTokenResponseProto.Builder rspBuilder = 
   GetDelegationTokenResponseProto.newBuilder();
   if (token != null) {
-rspBuilder.setToken(PBHelper.convert(token));
+rspBuilder.setToken(PBHelperClient.convert(token));
   }
   return rspBuilder.build();
 } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95f8e936/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 7e57b97..a0431b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -390,7 +390,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   String holder) throws AccessControlException, FileNotFoundException,
 UnresolvedLinkException, IOException {
 AbandonBlockRequestProto req = AbandonBlockRequestProto.newBuilder()
-.setB(PBHelper.convert(b)).setSrc(src).setHolder(holder)
+.setB(PBHelperClient.convert(b)).setSrc(src).setHolder(holder)
 .setFileId(fileId).build();
 try {
   rpcProxy.abandonBlock(null, req);
@@ -409,9 +409,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
 AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
 .setSrc(src).setClientName(clientName).setFileId(fileId);
 if (previous != null) 
-  req.setPrevious(PBHelper.convert(previous)); 
-if (excludeNodes != null) 
-  req.addAllExcludeNodes(PBHelper.convert(excludeNodes));
+  req.setPrevious(PBHelperClient.convert(previous));
+if (excludeNodes != null)
+  req.addAllExcludeNodes(PBHelperClient.convert(excludeNodes));
 if (favoredNodes != null) {
   req.addAllFavoredNodes(Arrays.asList(favoredNodes));
 }
@@ -433,10 +433,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
 .newBuilder()
 .setSrc(src)
 .setFileId(fileId)
-.setBlk(PBHelper.convert(blk))
-.addAllExistings(PBHelper.convert(existings))
+.setBlk(PBHelperClient.convert(blk))
+.addAllExistings(PBHelperClient.convert(existings))
 .addAllExistingStorageUuids(Arrays.asList(existingStorageIDs))
-.addAllExcludes(PBHelper.convert(excludes))
+.addAllExcludes(PBHelperClient.convert(excludes))
 .setNumAdditionalNodes(numAdditionalNodes)
 .setClientName(clientName)
 .build();
@@ -458,7 +458,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
 .setClientName(clientName)
 

hadoop git commit: HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs. Contributed by Anu Engineer

2015-08-24 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk b5ce87f84 - 48774d0a4


HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs. 
Contributed by Anu Engineer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48774d0a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48774d0a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48774d0a

Branch: refs/heads/trunk
Commit: 48774d0a45d95557affbd6bbaf8035cc9575ef36
Parents: b5ce87f
Author: Xiaoyu Yao x...@apache.org
Authored: Mon Aug 24 14:31:24 2015 -0700
Committer: Xiaoyu Yao x...@apache.org
Committed: Mon Aug 24 14:31:24 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../fs/CommonConfigurationKeysPublic.java   |  5 ++
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  5 +-
 .../main/java/org/apache/hadoop/ipc/Server.java | 60 +++
 .../apache/hadoop/ipc/WritableRpcEngine.java|  3 +
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   | 48 
 .../apache/hadoop/metrics2/lib/MutableStat.java |  7 +-
 .../src/main/resources/core-default.xml |  9 +++
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java  | 77 +++-
 .../org/apache/hadoop/test/MetricsAsserts.java  |  2 +-
 .../hadoop-common/src/test/proto/test.proto |  7 ++
 .../src/test/proto/test_rpc_service.proto   |  1 +
 12 files changed, 223 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48774d0a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b4445fa..4250fc3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -753,6 +753,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12050. Enable MaxInactiveInterval for hadoop http auth token
 (hzlu via benoyantony)
 
+HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs.
+(Anu Engineer via xyao)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48774d0a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 7231d59..24d648f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -235,6 +235,11 @@ public class CommonConfigurationKeysPublic {
   /** Default value for IPC_SERVER_MAX_CONNECTIONS_KEY */
   public static final int IPC_SERVER_MAX_CONNECTIONS_DEFAULT = 0;
 
+  /** Logs if a RPC is really slow compared to rest of RPCs. */
+  public static final String IPC_SERVER_LOG_SLOW_RPC =
+ipc.server.log.slow.rpc;
+  public static final boolean IPC_SERVER_LOG_SLOW_RPC_DEFAULT = false;
+
   /** See a href={@docRoot}/../core-default.htmlcore-default.xml/a */
   public static final String  HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY =
 hadoop.rpc.socket.factory.class.default;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48774d0a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index cc75f5c..532246d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -567,7 +567,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   /**
* This is a server side method, which is invoked over RPC. On success
* the return response has protobuf response payload. On failure, the
-   * exception name and the stack trace are return in the resposne.
+   * exception name and the stack trace are returned in the response.
* See {@link HadoopRpcResponseProto}
* 
* In this method there three types of exceptions possible and they are
@@ -657,6 +657,9 @@ public class 

[2/4] hadoop git commit: Revert HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu.

2015-08-24 Thread wang
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a727c6db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index a95f397..02b20d6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -698,7 +698,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB 
implements
   RpcController controller, GetDatanodeReportRequestProto req)
   throws ServiceException {
 try {
-  List? extends DatanodeInfoProto result = PBHelperClient.convert(server
+  List? extends DatanodeInfoProto result = PBHelper.convert(server
   .getDatanodeReport(PBHelper.convert(req.getType(;
   return GetDatanodeReportResponseProto.newBuilder()
   .addAllDi(result).build();
@@ -892,7 +892,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB 
implements
   server.setQuota(req.getPath(), req.getNamespaceQuota(),
   req.getStoragespaceQuota(),
   req.hasStorageType() ?
-  PBHelperClient.convertStorageType(req.getStorageType()): null);
+  PBHelper.convertStorageType(req.getStorageType()): null);
   return VOID_SETQUOTA_RESPONSE;
 } catch (IOException e) {
   throw new ServiceException(e);
@@ -992,7 +992,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB 
implements
   GetDelegationTokenResponseProto.Builder rspBuilder = 
   GetDelegationTokenResponseProto.newBuilder();
   if (token != null) {
-rspBuilder.setToken(PBHelperClient.convert(token));
+rspBuilder.setToken(PBHelper.convert(token));
   }
   return rspBuilder.build();
 } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a727c6db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index a0431b1..7e57b97 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -390,7 +390,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   String holder) throws AccessControlException, FileNotFoundException,
 UnresolvedLinkException, IOException {
 AbandonBlockRequestProto req = AbandonBlockRequestProto.newBuilder()
-.setB(PBHelperClient.convert(b)).setSrc(src).setHolder(holder)
+.setB(PBHelper.convert(b)).setSrc(src).setHolder(holder)
 .setFileId(fileId).build();
 try {
   rpcProxy.abandonBlock(null, req);
@@ -409,9 +409,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
 AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
 .setSrc(src).setClientName(clientName).setFileId(fileId);
 if (previous != null) 
-  req.setPrevious(PBHelperClient.convert(previous));
-if (excludeNodes != null)
-  req.addAllExcludeNodes(PBHelperClient.convert(excludeNodes));
+  req.setPrevious(PBHelper.convert(previous)); 
+if (excludeNodes != null) 
+  req.addAllExcludeNodes(PBHelper.convert(excludeNodes));
 if (favoredNodes != null) {
   req.addAllFavoredNodes(Arrays.asList(favoredNodes));
 }
@@ -433,10 +433,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
 .newBuilder()
 .setSrc(src)
 .setFileId(fileId)
-.setBlk(PBHelperClient.convert(blk))
-.addAllExistings(PBHelperClient.convert(existings))
+.setBlk(PBHelper.convert(blk))
+.addAllExistings(PBHelper.convert(existings))
 .addAllExistingStorageUuids(Arrays.asList(existingStorageIDs))
-.addAllExcludes(PBHelperClient.convert(excludes))
+.addAllExcludes(PBHelper.convert(excludes))
 .setNumAdditionalNodes(numAdditionalNodes)
 .setClientName(clientName)
 .build();
@@ -458,7 +458,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
 .setClientName(clientName)
 

[3/4] hadoop git commit: Revert HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu.

2015-08-24 Thread wang
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a727c6db/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
deleted file mode 100644
index 78325a3..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
+++ /dev/null
@@ -1,647 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.shortcircuit;
-
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.util.BitSet;
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-import java.util.Random;
-
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
-import org.apache.hadoop.fs.InvalidRequestException;
-import org.apache.hadoop.hdfs.ExtendedBlockId;
-import org.apache.hadoop.io.nativeio.NativeIO;
-import org.apache.hadoop.io.nativeio.NativeIO.POSIX;
-import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.util.StringUtils;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import sun.misc.Unsafe;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ComparisonChain;
-import com.google.common.primitives.Ints;
-
-/**
- * A shared memory segment used to implement short-circuit reads.
- */
-public class ShortCircuitShm {
-  private static final Logger LOG = 
LoggerFactory.getLogger(ShortCircuitShm.class);
-
-  protected static final int BYTES_PER_SLOT = 64;
-
-  private static final Unsafe unsafe = safetyDance();
-
-  private static Unsafe safetyDance() {
-try {
-  Field f = Unsafe.class.getDeclaredField(theUnsafe);
-  f.setAccessible(true);
-  return (Unsafe)f.get(null);
-} catch (Throwable e) {
-  LOG.error(failed to load misc.Unsafe, e);
-}
-return null;
-  }
-
-  /**
-   * Calculate the usable size of a shared memory segment.
-   * We round down to a multiple of the slot size and do some validation.
-   *
-   * @param stream The stream we're using.
-   * @return   The usable size of the shared memory segment.
-   */
-  private static int getUsableLength(FileInputStream stream)
-  throws IOException {
-int intSize = Ints.checkedCast(stream.getChannel().size());
-int slots = intSize / BYTES_PER_SLOT;
-if (slots == 0) {
-  throw new IOException(size of shared memory segment was  +
-  intSize + , but that is not enough to hold even one slot.);
-}
-return slots * BYTES_PER_SLOT;
-  }
-
-  /**
-   * Identifies a DfsClientShm.
-   */
-  public static class ShmId implements ComparableShmId {
-private static final Random random = new Random();
-private final long hi;
-private final long lo;
-
-/**
- * Generate a random ShmId.
- * 
- * We generate ShmIds randomly to prevent a malicious client from
- * successfully guessing one and using that to interfere with another
- * client.
- */
-public static ShmId createRandom() {
-  return new ShmId(random.nextLong(), random.nextLong());
-}
-
-public ShmId(long hi, long lo) {
-  this.hi = hi;
-  this.lo = lo;
-}
-
-public long getHi() {
-  return hi;
-}
-
-public long getLo() {
-  return lo;
-}
-
-@Override
-public boolean equals(Object o) {
-  if ((o == null) || (o.getClass() != this.getClass())) {
-return false;
-  }
-  ShmId other = (ShmId)o;
-  return new EqualsBuilder().
-  append(hi, other.hi).
-  append(lo, other.lo).
-  isEquals();
-}
-
-@Override
-public int hashCode() {
-  return new HashCodeBuilder().
-  append(this.hi).
-  append(this.lo).
-  toHashCode();
-}
-
-@Override
-public String toString() {
-  return String.format(%016x%016x, hi, lo);

[4/4] hadoop git commit: Revert HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu.

2015-08-24 Thread wang
Revert HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by 
Mingliang Liu.

This reverts commit 8e4afa3a671583c95263218b85cf6bfbc1e43635.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a727c6db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a727c6db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a727c6db

Branch: refs/heads/branch-2
Commit: a727c6db0530aff5dcccb4181ba83e93e543ac5c
Parents: 87d0133
Author: Andrew Wang w...@apache.org
Authored: Mon Aug 24 11:51:46 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Mon Aug 24 11:51:46 2015 -0700

--
 .../org/apache/hadoop/hdfs/ExtendedBlockId.java |  82 ---
 .../org/apache/hadoop/hdfs/net/DomainPeer.java  | 132 
 .../java/org/apache/hadoop/hdfs/net/Peer.java   | 123 
 .../datatransfer/BlockConstructionStage.java|  62 --
 .../datatransfer/DataTransferProtoUtil.java | 146 -
 .../datatransfer/DataTransferProtocol.java  | 202 --
 .../hadoop/hdfs/protocol/datatransfer/Op.java   |  66 --
 .../hdfs/protocol/datatransfer/Sender.java  | 261 
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 254 
 .../token/block/InvalidBlockTokenException.java |  41 --
 .../hdfs/server/datanode/CachingStrategy.java   |  76 ---
 .../hadoop/hdfs/shortcircuit/DfsClientShm.java  | 119 
 .../hdfs/shortcircuit/DfsClientShmManager.java  | 522 ---
 .../hdfs/shortcircuit/ShortCircuitShm.java  | 647 ---
 .../hadoop/hdfs/util/ExactSizeInputStream.java  | 125 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 -
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |   4 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  10 +-
 .../org/apache/hadoop/hdfs/DataStreamer.java|   6 +-
 .../org/apache/hadoop/hdfs/ExtendedBlockId.java |  82 +++
 .../apache/hadoop/hdfs/RemoteBlockReader.java   |   4 +-
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  |   4 +-
 .../org/apache/hadoop/hdfs/net/DomainPeer.java  | 132 
 .../java/org/apache/hadoop/hdfs/net/Peer.java   | 123 
 .../datatransfer/BlockConstructionStage.java|  62 ++
 .../datatransfer/DataTransferProtoUtil.java | 148 +
 .../datatransfer/DataTransferProtocol.java  | 201 ++
 .../hadoop/hdfs/protocol/datatransfer/Op.java   |  66 ++
 .../hdfs/protocol/datatransfer/PipelineAck.java |   2 +-
 .../hdfs/protocol/datatransfer/Receiver.java|   7 +-
 .../hdfs/protocol/datatransfer/Sender.java  | 261 
 .../datatransfer/sasl/DataTransferSaslUtil.java |   2 +-
 ...tDatanodeProtocolServerSideTranslatorPB.java |   2 +-
 .../ClientDatanodeProtocolTranslatorPB.java |   6 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |   6 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  28 +-
 .../DatanodeProtocolClientSideTranslatorPB.java |   4 +-
 .../InterDatanodeProtocolTranslatorPB.java  |   2 +-
 .../NamenodeProtocolTranslatorPB.java   |   2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 228 ++-
 .../token/block/InvalidBlockTokenException.java |  41 ++
 .../hadoop/hdfs/server/balancer/Dispatcher.java |   2 +-
 .../hdfs/server/datanode/CachingStrategy.java   |  76 +++
 .../hadoop/hdfs/server/datanode/DataNode.java   |   4 +-
 .../hdfs/server/datanode/DataXceiver.java   |  14 +-
 .../server/namenode/FSImageFormatPBINode.java   |   5 +-
 .../hadoop/hdfs/shortcircuit/DfsClientShm.java  | 119 
 .../hdfs/shortcircuit/DfsClientShmManager.java  | 514 +++
 .../hdfs/shortcircuit/ShortCircuitCache.java|   4 +-
 .../hdfs/shortcircuit/ShortCircuitShm.java  | 646 ++
 .../hadoop/hdfs/util/ExactSizeInputStream.java  | 125 
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|  20 +-
 52 files changed, 2873 insertions(+), 2949 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a727c6db/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
deleted file mode 100644
index 7b9e8e3..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * 

[1/4] hadoop git commit: Revert HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu.

2015-08-24 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 87d013370 - a727c6db0


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a727c6db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
new file mode 100644
index 000..17365fb
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.io.EOFException;
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * An InputStream implementations which reads from some other InputStream
+ * but expects an exact number of bytes. Any attempts to read past the
+ * specified number of bytes will return as if the end of the stream
+ * was reached. If the end of the underlying stream is reached prior to
+ * the specified number of bytes, an EOFException is thrown.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class ExactSizeInputStream extends FilterInputStream {
+  private int remaining;
+
+  /**
+   * Construct an input stream that will read no more than
+   * 'numBytes' bytes.
+   * 
+   * If an EOF occurs on the underlying stream before numBytes
+   * bytes have been read, an EOFException will be thrown.
+   * 
+   * @param in the inputstream to wrap
+   * @param numBytes the number of bytes to read
+   */
+  public ExactSizeInputStream(InputStream in, int numBytes) {
+super(in);
+Preconditions.checkArgument(numBytes = 0,
+Negative expected bytes: , numBytes);
+this.remaining = numBytes;
+  }
+
+  @Override
+  public int available() throws IOException {
+return Math.min(super.available(), remaining);
+  }
+
+  @Override
+  public int read() throws IOException {
+// EOF if we reached our limit
+if (remaining = 0) {
+  return -1;
+}
+final int result = super.read();
+if (result = 0) {
+  --remaining;
+} else if (remaining  0) {
+  // Underlying stream reached EOF but we haven't read the expected
+  // number of bytes.
+  throw new EOFException(
+  Premature EOF. Expected  + remaining + more bytes);
+}
+return result;
+  }
+
+  @Override
+  public int read(final byte[] b, final int off, int len)
+  throws IOException {
+if (remaining = 0) {
+  return -1;
+}
+len = Math.min(len, remaining);
+final int result = super.read(b, off, len);
+if (result = 0) {
+  remaining -= result;
+} else if (remaining  0) {
+  // Underlying stream reached EOF but we haven't read the expected
+  // number of bytes.
+  throw new EOFException(
+  Premature EOF. Expected  + remaining + more bytes);
+}
+return result;
+  }
+
+  @Override
+  public long skip(final long n) throws IOException {
+final long result = super.skip(Math.min(n, remaining));
+if (result  0) {
+  remaining -= result;
+} else if (remaining  0) {
+  // Underlying stream reached EOF but we haven't read the expected
+  // number of bytes.
+  throw new EOFException(
+  Premature EOF. Expected  + remaining + more bytes);
+}
+return result;
+  }
+  
+  @Override
+  public boolean markSupported() {
+return false;
+  }
+
+  @Override
+  public void mark(int readlimit) {
+throw new UnsupportedOperationException();
+  }
+  
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a727c6db/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
--
diff --git 

[28/50] [abbrv] hadoop git commit: HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. Contributed by Stephen Chu. Moved CHANGES.txt entry to 2.6.1

2015-08-24 Thread zhz
HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. Contributed by Stephen Chu.
Moved CHANGES.txt entry to 2.6.1

(cherry picked from commit d6050f06a3b7e049541b1cb4597c388abf00a5be)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24a11e39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24a11e39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24a11e39

Branch: refs/heads/HDFS-7285
Commit: 24a11e39960696d75e58df912ec6aa7283be194d
Parents: fa26411
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Aug 14 12:09:10 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri Aug 14 12:09:56 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24a11e39/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e458042..c84af6a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1885,6 +1885,8 @@ Release 2.6.1 - UNRELEASED
 architecture because it is slower there (Suman Somasundar via Colin P.
 McCabe)
 
+HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. (Stephen Chu via wheat9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



[38/50] [abbrv] hadoop git commit: MAPREDUCE-6439. AM may fail instead of retrying if RM shuts down during the allocate call. (Anubhav Dhoot via kasha)

2015-08-24 Thread zhz
MAPREDUCE-6439. AM may fail instead of retrying if RM shuts down during the 
allocate call. (Anubhav Dhoot via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8dfec7a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8dfec7a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8dfec7a1

Branch: refs/heads/HDFS-7285
Commit: 8dfec7a1979e8f70f8355c096874921d368342ef
Parents: dc7a061
Author: Karthik Kambatla ka...@apache.org
Authored: Sat Aug 15 00:52:11 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Sat Aug 15 00:52:11 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../mapreduce/v2/app/rm/RMCommunicator.java | 51 +-
 .../app/rm/RMContainerAllocationException.java  | 31 ++
 .../v2/app/rm/RMContainerAllocator.java |  4 +-
 .../mapreduce/v2/app/rm/TestRMCommunicator.java | 99 
 .../v2/app/rm/TestRMContainerAllocator.java | 48 --
 6 files changed, 202 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dfec7a1/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 19bd697..d2eef32 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -573,6 +573,9 @@ Release 2.7.2 - UNRELEASED
 MAPREDUCE-6426. TestShuffleHandler#testGetMapOutputInfo is failing.
 (zhihai xu via devaraj)
 
+MAPREDUCE-6439. AM may fail instead of retrying if RM shuts down during the
+allocate call. (Anubhav Dhoot via kasha)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dfec7a1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
index 5d4fa12..6cec2f3 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -270,35 +270,38 @@ public abstract class RMCommunicator extends 
AbstractService
 super.serviceStop();
   }
 
-  protected void startAllocatorThread() {
-allocatorThread = new Thread(new Runnable() {
-  @Override
-  public void run() {
-while (!stopped.get()  !Thread.currentThread().isInterrupted()) {
+  @VisibleForTesting
+  public class AllocatorRunnable implements Runnable {
+@Override
+public void run() {
+  while (!stopped.get()  !Thread.currentThread().isInterrupted()) {
+try {
+  Thread.sleep(rmPollInterval);
   try {
-Thread.sleep(rmPollInterval);
-try {
-  heartbeat();
-} catch (YarnRuntimeException e) {
-  LOG.error(Error communicating with RM:  + e.getMessage() , e);
-  return;
-} catch (Exception e) {
-  LOG.error(ERROR IN CONTACTING RM. , e);
-  continue;
-  // TODO: for other exceptions
-}
-
-lastHeartbeatTime = context.getClock().getTime();
-executeHeartbeatCallbacks();
-  } catch (InterruptedException e) {
-if (!stopped.get()) {
-  LOG.warn(Allocated thread interrupted. Returning.);
-}
+heartbeat();
+  } catch (RMContainerAllocationException e) {
+LOG.error(Error communicating with RM:  + e.getMessage() , e);
 return;
+  } catch (Exception e) {
+LOG.error(ERROR IN CONTACTING RM. , e);
+continue;
+// TODO: for other exceptions
+  }
+
+  lastHeartbeatTime = context.getClock().getTime();
+  executeHeartbeatCallbacks();
+} catch (InterruptedException e) {
+  if (!stopped.get()) {
+LOG.warn(Allocated thread interrupted. Returning.);
   }
+  return;
 }
   }
-});
+}
+  }
+
+  protected void startAllocatorThread() {
+allocatorThread = new Thread(new AllocatorRunnable());
 allocatorThread.setName(RMCommunicator Allocator);
 

[09/50] [abbrv] hadoop git commit: YARN-3999. RM hangs on draing events. Contributed by Jian He

2015-08-24 Thread zhz
YARN-3999. RM hangs on draing events. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ae716fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ae716fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ae716fa

Branch: refs/heads/HDFS-7285
Commit: 3ae716fa696b87e849dae40225dc59fb5ed114cb
Parents: 7c796fd
Author: Xuan xg...@apache.org
Authored: Tue Aug 11 18:25:11 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Tue Aug 11 18:25:11 2015 -0700

--
 .../org/apache/hadoop/util/JvmPauseMonitor.java | 12 +---
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../hadoop/yarn/conf/YarnConfiguration.java |  5 +++
 .../hadoop/yarn/event/AsyncDispatcher.java  |  8 -
 .../src/main/resources/yarn-default.xml |  9 ++
 .../hadoop/yarn/event/TestAsyncDispatcher.java  | 25 ---
 .../resourcemanager/RMActiveServiceContext.java | 30 --
 .../server/resourcemanager/RMContextImpl.java   | 23 +++---
 .../server/resourcemanager/ResourceManager.java | 32 ++--
 .../server/resourcemanager/TestAppManager.java  |  5 +--
 .../resourcemanager/TestResourceManager.java|  1 +
 .../TestRMAppLogAggregationStatus.java  |  4 +--
 .../rmapp/TestRMAppTransitions.java |  6 ++--
 .../attempt/TestRMAppAttemptTransitions.java|  8 ++---
 .../scheduler/capacity/TestUtils.java   |  9 +++---
 .../scheduler/fifo/TestFifoScheduler.java   | 17 +--
 16 files changed, 104 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ae716fa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
index 1fe7796..cd5afc6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
@@ -83,11 +83,13 @@ public class JvmPauseMonitor {
   
   public void stop() {
 shouldRun = false;
-monitorThread.interrupt();
-try {
-  monitorThread.join();
-} catch (InterruptedException e) {
-  Thread.currentThread().interrupt();
+if (monitorThread != null) {
+  monitorThread.interrupt();
+  try {
+monitorThread.join();
+  } catch (InterruptedException e) {
+Thread.currentThread().interrupt();
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ae716fa/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a1f6861..f31dd4b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -771,6 +771,7 @@ Release 2.7.2 - UNRELEASED
 YARN-3978. Configurably turn off the saving of container info in Generic 
AHS
 (Eric Payne via jeagles)
 
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -801,6 +802,8 @@ Release 2.7.2 - UNRELEASED
 YARN-3990. AsyncDispatcher may overloaded with RMAppNodeUpdateEvent when
 Node is connected/disconnected (Bibin A Chundatt via jlowe)
 
+YARN-3999. RM hangs on draing events. (Jian He via xgong)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ae716fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f1baf5c..93f7ed6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1300,6 +1300,11 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS
   = 30 * 1000;
 
+  public static final String DISPATCHER_DRAIN_EVENTS_TIMEOUT =
+  YARN_PREFIX + dispatcher.drain-events.timeout;
+
+  public static final long DEFAULT_DISPATCHER_DRAIN_EVENTS_TIMEOUT = 30;
+
   /**
* CLASSPATH for YARN applications. A comma-separated list of CLASSPATH
* 

[50/50] [abbrv] hadoop git commit: Merge commit '456e901a4c5c639267ee87b8e5f1319f256d20c2' (HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI. Contributed by Haohui Mai.) into

2015-08-24 Thread zhz
Merge commit '456e901a4c5c639267ee87b8e5f1319f256d20c2' (HDFS-6407. Add sorting 
and pagination in the datanode tab of the NN Web UI. Contributed by Haohui 
Mai.) into HDFS-7285-merge


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b6a63bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b6a63bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b6a63bb

Branch: refs/heads/HDFS-7285
Commit: 6b6a63bbbda920315d3d24b61ed3344a78a981b6
Parents: b57c9a3 456e901
Author: Zhe Zhang zhezh...@cloudera.com
Authored: Wed Aug 19 22:52:32 2015 -0700
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Aug 24 12:59:26 2015 -0700

--
 .gitignore  | 1 +
 BUILDING.txt|39 +-
 dev-support/determine-flaky-tests-hadoop.py |57 +-
 dev-support/docker/Dockerfile   | 8 +
 dev-support/releasedocmaker.py  |   405 +-
 dev-support/smart-apply-patch.sh|52 +-
 dev-support/test-patch.d/shellcheck.sh  | 2 +-
 dev-support/test-patch.sh   |   310 +-
 hadoop-client/pom.xml   | 8 +
 .../server/AuthenticationFilter.java|28 +-
 .../src/site/markdown/Configuration.md  |18 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |   368 +-
 .../hadoop-common/HadoopCommon.cmake|   207 +
 .../hadoop-common/HadoopJNI.cmake   |97 +
 hadoop-common-project/hadoop-common/pom.xml |41 +-
 .../hadoop-common/src/CMakeLists.txt|   366 +-
 .../hadoop-common/src/JNIFlags.cmake|   124 -
 .../hadoop-common/src/main/bin/hadoop   |62 +-
 .../hadoop-common/src/main/bin/hadoop-config.sh |73 +-
 .../src/main/bin/hadoop-functions.sh|   423 +-
 .../hadoop-common/src/main/bin/slaves.sh| 3 +-
 .../main/conf/hadoop-user-functions.sh.example  |29 +-
 .../src/main/conf/log4j.properties  |21 +-
 .../src/main/docs/changes/ChangesFancyStyle.css |   170 -
 .../main/docs/changes/ChangesSimpleStyle.css|49 -
 .../src/main/docs/changes/changes2html.pl   |   286 -
 .../src/main/docs/releasenotes.html | 29099 -
 .../src/main/docs/src/documentation/README.txt  | 7 -
 .../classes/CatalogManager.properties   |40 -
 .../main/docs/src/documentation/conf/cli.xconf  |   327 -
 .../src/documentation/content/xdocs/index.xml   |48 -
 .../src/documentation/content/xdocs/site.xml|   263 -
 .../src/documentation/content/xdocs/tabs.xml|37 -
 .../resources/images/architecture.gif   |   Bin 15461 - 0 bytes
 .../resources/images/common-logo.jpg|   Bin 5887 - 0 bytes
 .../resources/images/core-logo.gif  |   Bin 6665 - 0 bytes
 .../documentation/resources/images/favicon.ico  |   Bin 766 - 0 bytes
 .../resources/images/hadoop-logo-big.jpg|   Bin 127869 - 0 bytes
 .../resources/images/hadoop-logo.jpg|   Bin 9443 - 0 bytes
 .../resources/images/hdfsarchitecture.gif   |   Bin 17653 - 0 bytes
 .../resources/images/hdfsarchitecture.odg   |   Bin 41298 - 0 bytes
 .../resources/images/hdfsarchitecture.png   |   Bin 40571 - 0 bytes
 .../resources/images/hdfsdatanodes.gif  |   Bin 16060 - 0 bytes
 .../resources/images/hdfsdatanodes.odg  |   Bin 37296 - 0 bytes
 .../resources/images/hdfsdatanodes.png  |   Bin 30012 - 0 bytes
 .../main/docs/src/documentation/skinconf.xml|   366 -
 .../hadoop-common/src/main/docs/status.xml  |75 -
 .../org/apache/hadoop/conf/Configuration.java   | 8 +-
 .../apache/hadoop/conf/ReconfigurableBase.java  |14 +-
 .../crypto/key/kms/KMSClientProvider.java   | 4 +-
 .../apache/hadoop/fs/AbstractFileSystem.java|13 +
 .../org/apache/hadoop/fs/BlockLocation.java |55 +-
 .../apache/hadoop/fs/ByteBufferReadable.java| 4 +
 .../fs/CommonConfigurationKeysPublic.java   | 6 +
 .../apache/hadoop/fs/DelegateToFileSystem.java  |19 +-
 .../java/org/apache/hadoop/fs/FileContext.java  |23 +
 .../java/org/apache/hadoop/fs/FileStatus.java   |15 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |   196 +-
 .../java/org/apache/hadoop/fs/FileUtil.java |   206 +-
 .../org/apache/hadoop/fs/FilterFileSystem.java  | 6 +
 .../java/org/apache/hadoop/fs/FilterFs.java | 6 +
 .../main/java/org/apache/hadoop/fs/FsShell.java |33 +-
 .../main/java/org/apache/hadoop/fs/Globber.java |28 +-
 .../org/apache/hadoop/fs/LocatedFileStatus.java |10 +-
 .../apache/hadoop/fs/RawLocalFileSystem.java|   156 +-
 .../java/org/apache/hadoop/fs/StorageType.java  | 7 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java| 7 +-
 .../hadoop/fs/permission/FsPermission.java  

[39/50] [abbrv] hadoop git commit: YARN-3534. Collect memory/cpu usage on the node. (Inigo Goiri via kasha)

2015-08-24 Thread zhz
YARN-3534. Collect memory/cpu usage on the node. (Inigo Goiri via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/def12933
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/def12933
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/def12933

Branch: refs/heads/HDFS-7285
Commit: def12933b38efd5e47c5144b729c1a1496f09229
Parents: 8dfec7a
Author: Karthik Kambatla ka...@apache.org
Authored: Sun Aug 16 06:24:16 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Sun Aug 16 06:24:16 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  11 +-
 .../src/main/resources/yarn-default.xml |  19 ++-
 .../server/nodemanager/NodeResourceMonitor.java |  10 +-
 .../nodemanager/NodeResourceMonitorImpl.java| 140 +++
 .../monitor/ContainersMonitorImpl.java  |   8 +-
 .../nodemanager/TestNodeResourceMonitor.java|  35 +
 .../launcher/TestContainerLaunch.java   |   2 +-
 .../monitor/TestContainersMonitor.java  |   2 +-
 9 files changed, 220 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/def12933/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c451320..287a913 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -170,6 +170,8 @@ Release 2.8.0 - UNRELEASED
 YARN-4023. Publish Application Priority to TimelineServer. (Sunil G 
 via rohithsharmaks)
 
+YARN-3534. Collect memory/cpu usage on the node. (Inigo Goiri via kasha)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/def12933/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 93f7ed6..6c438f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -960,12 +960,21 @@ public class YarnConfiguration extends Configuration {
   public static final int DEFAULT_NM_WEBAPP_HTTPS_PORT = 8044;
   public static final String DEFAULT_NM_WEBAPP_HTTPS_ADDRESS = 0.0.0.0:
   + DEFAULT_NM_WEBAPP_HTTPS_PORT; 
-  
+
+  /** How often to monitor resource in a node.*/
+  public static final String NM_RESOURCE_MON_INTERVAL_MS =
+  NM_PREFIX + resource-monitor.interval-ms;
+  public static final int DEFAULT_NM_RESOURCE_MON_INTERVAL_MS = 3000;
+
   /** How often to monitor containers.*/
   public final static String NM_CONTAINER_MON_INTERVAL_MS =
 NM_PREFIX + container-monitor.interval-ms;
+  @Deprecated
   public final static int DEFAULT_NM_CONTAINER_MON_INTERVAL_MS = 3000;
 
+  /** Class that calculates current resource utilization.*/
+  public static final String NM_MON_RESOURCE_CALCULATOR =
+  NM_PREFIX + resource-calculator.class;
   /** Class that calculates containers current resource utilization.*/
   public static final String NM_CONTAINER_MON_RESOURCE_CALCULATOR =
 NM_PREFIX + container-monitor.resource-calculator.class;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/def12933/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 402377d..53face0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1235,13 +1235,26 @@
   /property
 
   property
-descriptionHow often to monitor containers./description
-nameyarn.nodemanager.container-monitor.interval-ms/name
+descriptionHow often to monitor the node and the 
containers./description
+nameyarn.nodemanager.resource-monitor.interval-ms/name
 value3000/value
   /property
 
   property
-descriptionClass that calculates containers current resource 
utilization./description
+descriptionClass that calculates 

[31/50] [abbrv] hadoop git commit: HADOOP-11333. Fix deadlock in DomainSocketWatcher when the notification pipe is full (zhaoyunjiong via cmccabe) Moved to 2.6.1

2015-08-24 Thread zhz
HADOOP-11333. Fix deadlock in DomainSocketWatcher when the notification pipe is 
full (zhaoyunjiong via cmccabe)
Moved to 2.6.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05ed6905
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05ed6905
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05ed6905

Branch: refs/heads/HDFS-7285
Commit: 05ed69058f22ebeccc58faf0be491c269e950526
Parents: 08bd4ed
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Aug 14 12:53:46 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri Aug 14 12:53:46 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05ed6905/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6e48c20..57ef1c5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1499,9 +1499,6 @@ Release 2.7.0 - 2015-04-20
 HADOOP-11300. KMS startup scripts must not display the keystore /
 truststore passwords. (Arun Suresh via wang)
 
-HADOOP-11333. Fix deadlock in DomainSocketWatcher when the notification
-pipe is full (zhaoyunjiong via cmccabe)
-
 HADOOP-11337. KeyAuthorizationKeyProvider access checks need to be done
 atomically. (Dian Fu via wang)
 
@@ -1885,6 +1882,9 @@ Release 2.6.1 - UNRELEASED
 
 HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. (Stephen Chu via wheat9)
 
+HADOOP-11333. Fix deadlock in DomainSocketWatcher when the notification
+pipe is full (zhaoyunjiong via cmccabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



[44/50] [abbrv] hadoop git commit: HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI. Contributed by Haohui Mai.

2015-08-24 Thread zhz
HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/456e901a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/456e901a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/456e901a

Branch: refs/heads/HDFS-7285
Commit: 456e901a4c5c639267ee87b8e5f1319f256d20c2
Parents: 2e7b7e2
Author: Haohui Mai whe...@apache.org
Authored: Mon Aug 17 11:04:00 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon Aug 17 11:04:00 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   3 +
 .../src/main/webapps/hdfs/dfshealth.html|  20 +-
 .../src/main/webapps/hdfs/dfshealth.js  |  17 +
 .../webapps/static/dataTables.bootstrap.css | 371 +++
 .../main/webapps/static/dataTables.bootstrap.js | 205 ++
 .../webapps/static/jquery.dataTables.min.js | 160 
 7 files changed, 771 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/456e901a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bfd95f7..c12e678 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -793,6 +793,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging. (wang)
 
+HDFS-6407. Add sorting and pagination in the datanode tab of the NN Web UI.
+(wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456e901a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 145a8cd..1a29ad3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -390,6 +390,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
 excludesrc/main/webapps/static/dust-full-2.0.0.min.js/exclude
 
excludesrc/main/webapps/static/dust-helpers-1.1.1.min.js/exclude
 excludesrc/main/webapps/static/jquery-1.10.2.min.js/exclude
+excludesrc/main/webapps/static/jquery.dataTables.min.js/exclude
+excludesrc/main/webapps/static/dataTables.bootstrap.css/exclude
+excludesrc/main/webapps/static/dataTables.bootstrap.js/exclude
   /excludes
 /configuration
   /plugin

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456e901a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 7e7604a..38808ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -20,6 +20,7 @@
 head
 meta http-equiv=X-UA-Compatible content=IE=9 /
 link rel=stylesheet type=text/css 
href=/static/bootstrap-3.0.2/css/bootstrap.min.css /
+link rel=stylesheet type=text/css href=/static/dataTables.bootstrap.css 
/
 link rel=stylesheet type=text/css href=/static/hadoop.css /
 titleNamenode information/title
 /head
@@ -290,7 +291,7 @@
 /div
 div class=page-headerh1smallIn operation/small/h1/div
 small
-table class=table
+table class=table id=table-datanodes
   thead
 tr
   thNode/th
@@ -303,7 +304,7 @@
   /thead
   {#LiveNodes}
   tr
-td class=dfshealth-node-icon dfshealth-node-{state}{name} 
({xferaddr})/td
+td ng-value={state}-{name} class=dfshealth-node-icon 
dfshealth-node-{state}{name} ({xferaddr})/td
 td ng-value={lastContact}{#helper_relative_time 
value={lastContact}/}/td
 td ng-value={usedPercentage}
   div
@@ -315,18 +316,18 @@
   /div
 /td
 td{numBlocks}/td
-td{blockPoolUsed|fmt_bytes} ({blockPoolUsedPercent|fmt_percentage})/td
+td ng-value={blockPoolUsedPercent}{blockPoolUsed|fmt_bytes} 
({blockPoolUsedPercent|fmt_percentage})/td
 td{version}/td
   /tr
   {/LiveNodes}
   {#DeadNodes}
   tr class=danger
-td class=dfshealth-node-icon dfshealth-node-{state}{name} 
({xferaddr})/td
+td ng-value={state}-{name} class=dfshealth-node-icon 
dfshealth-node-{state}{name} ({xferaddr})/td
 td{#helper_relative_time value={lastContact}/}/td
-td-/td
-td-/td
-td-/td
-td-/td
+td/td
+td/td
+

[37/50] [abbrv] hadoop git commit: HDFS-8891. HDFS concat should keep srcs order. Contributed by Yong Zhang.

2015-08-24 Thread zhz
HDFS-8891. HDFS concat should keep srcs order. Contributed by Yong Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc7a0616
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc7a0616
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc7a0616

Branch: refs/heads/HDFS-7285
Commit: dc7a061668a3f4d86fe1b07a40d46774b5386938
Parents: 2bc0a4f
Author: Jing Zhao ji...@apache.org
Authored: Fri Aug 14 14:42:43 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Fri Aug 14 14:42:43 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../hdfs/server/namenode/FSDirConcatOp.java |  5 ++--
 .../hdfs/server/namenode/TestHDFSConcat.java| 24 
 3 files changed, 24 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc7a0616/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index be799af..20b5467 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1141,6 +1141,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8565. Typo in dfshealth.html - Decomissioning. (nijel via xyao)
 
+HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc7a0616/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index bb00130..786284d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
+
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.StorageType;
@@ -28,7 +29,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotException;
 
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.HashSet;
+import java.util.LinkedHashSet;
 import java.util.Set;
 import java.util.List;
 
@@ -103,7 +104,7 @@ class FSDirConcatOp {
   private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs,
   INodesInPath targetIIP, FSPermissionChecker pc) throws IOException {
 // to make sure no two files are the same
-SetINodeFile si = new HashSet();
+SetINodeFile si = new LinkedHashSet();
 final INodeFile targetINode = targetIIP.getLastINode().asFile();
 final INodeDirectory targetParent = targetINode.getParent();
 // now check the srcs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc7a0616/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
index e1c3c0f..4685eb9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
@@ -111,18 +111,21 @@ public class TestHDFSConcat {
 long trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount();

 Path [] files = new Path[numFiles];
-byte [] [] bytes = new byte [numFiles][(int)fileLen];
+byte[][] bytes = new byte[numFiles + 1][(int) fileLen];
 LocatedBlocks [] lblocks = new LocatedBlocks[numFiles];
 long [] lens = new long [numFiles];
 
-
+stm = dfs.open(trgPath);
+stm.readFully(0, bytes[0]);
+stm.close();
 int i;
 for(i=0; ifiles.length; i++) {
   files[i] = new Path(/file+i);
   Path path = files[i];
   System.out.println(Creating file  + path);
-  DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
-
+
+  // make files with different content
+  DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, i);
   fStatus = nn.getFileInfo(path.toUri().getPath());
   lens[i] = 

[35/50] [abbrv] hadoop git commit: HDFS-8565. Typo in dfshealth.html - Decomissioning. (nijel via xyao)

2015-08-24 Thread zhz
HDFS-8565. Typo in dfshealth.html - Decomissioning. (nijel via xyao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1569228e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1569228e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1569228e

Branch: refs/heads/HDFS-7285
Commit: 1569228ec9090823186f062257fdf1beb5ee1781
Parents: 27d24f9
Author: Xiaoyu Yao x...@apache.org
Authored: Fri Aug 14 12:33:43 2015 -0700
Committer: Xiaoyu Yao x...@apache.org
Committed: Fri Aug 14 12:34:31 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 2 ++
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1569228e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0b28709..880284c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1137,6 +1137,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8879. Quota by storage type usage incorrectly initialized upon 
namenode
 restart. (xyao)
 
+HDFS-8565. Typo in dfshealth.html - Decomissioning. (nijel via xyao)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1569228e/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 6b48be7..7e7604a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -332,7 +332,7 @@
 /table
 /small
 
-div class=page-headerh1smallDecomissioning/small/h1/div
+div class=page-headerh1smallDecommissioning/small/h1/div
 small
 table class=table
   thead



[40/50] [abbrv] hadoop git commit: YARN-4055. Report node resource utilization in heartbeat. (Inigo Goiri via kasha)

2015-08-24 Thread zhz
YARN-4055. Report node resource utilization in heartbeat. (Inigo Goiri via 
kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13604bd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13604bd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13604bd5

Branch: refs/heads/HDFS-7285
Commit: 13604bd5f119fc81b9942190dfa366afad61bc92
Parents: def1293
Author: Karthik Kambatla ka...@apache.org
Authored: Sun Aug 16 15:08:53 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Sun Aug 16 15:08:53 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../yarn/client/TestResourceTrackerOnHA.java|  2 +-
 .../yarn/server/api/records/NodeStatus.java | 18 +++-
 .../api/records/impl/pb/NodeStatusPBImpl.java   | 22 
 .../main/proto/yarn_server_common_protos.proto  |  1 +
 .../hadoop/yarn/server/nodemanager/Context.java |  2 ++
 .../yarn/server/nodemanager/NodeManager.java| 16 --
 .../nodemanager/NodeStatusUpdaterImpl.java  | 13 +++-
 8 files changed, 72 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13604bd5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 287a913..66978a0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -172,6 +172,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3534. Collect memory/cpu usage on the node. (Inigo Goiri via kasha)
 
+YARN-4055. Report node resource utilization in heartbeat. 
+(Inigo Goiri via kasha)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13604bd5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
index c51570c..6cdf87f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
@@ -68,7 +68,7 @@ public class TestResourceTrackerOnHA extends 
ProtocolHATestBase{
 failoverThread = createAndStartFailoverThread();
 NodeStatus status =
 NodeStatus.newInstance(NodeId.newInstance(localhost, 0), 0, null,
-null, null, null);
+null, null, null, null);
 NodeHeartbeatRequest request2 =
 NodeHeartbeatRequest.newInstance(status, null, null,null);
 resourceTracker.nodeHeartbeat(request2);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13604bd5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
index 38b0381..24391bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
@@ -47,13 +47,15 @@ public abstract class NodeStatus {
* @param keepAliveApplications Applications to keep alive.
* @param nodeHealthStatus Health status of the node.
* @param containersUtilizations Utilization of the containers in this node.
+   * @param nodeUtilization Utilization of the node.
* @return New {@code NodeStatus} with the provided information.
*/
   public static NodeStatus newInstance(NodeId nodeId, int responseId,
   ListContainerStatus containerStatuses,
   ListApplicationId keepAliveApplications,
   NodeHealthStatus nodeHealthStatus,
-  ResourceUtilization containersUtilization) {
+  ResourceUtilization containersUtilization,
+  ResourceUtilization nodeUtilization) {
 NodeStatus nodeStatus = 

[11/50] [abbrv] hadoop git commit: YARN-4023. Publish Application Priority to TimelineServer. (Sunil G via rohithsharmaks)

2015-08-24 Thread zhz
YARN-4023. Publish Application Priority to TimelineServer. (Sunil G via 
rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c12adb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c12adb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c12adb7

Branch: refs/heads/HDFS-7285
Commit: 1c12adb71f32f24ec525313ff8dfcd2b738fc8e2
Parents: 1ea1a83
Author: rohithsharmaks rohithsharm...@apache.org
Authored: Wed Aug 12 14:45:41 2015 +0530
Committer: rohithsharmaks rohithsharm...@apache.org
Committed: Wed Aug 12 14:45:41 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../yarn/api/records/ApplicationReport.java |  3 ++-
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  2 +-
 .../hadoop/yarn/api/TestApplicatonReport.java   |  3 ++-
 ...pplicationHistoryManagerOnTimelineStore.java | 21 ++--
 ...pplicationHistoryManagerOnTimelineStore.java |  2 ++
 .../metrics/ApplicationMetricsConstants.java|  3 +++
 .../hadoop/yarn/server/webapp/AppsBlock.java| 12 ++-
 .../metrics/ApplicationCreatedEvent.java| 10 +-
 .../metrics/SystemMetricsPublisher.java |  5 -
 .../metrics/TestSystemMetricsPublisher.java |  6 ++
 .../src/site/markdown/TimelineServer.md | 19 ++
 12 files changed, 69 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c12adb7/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f31dd4b..199a930 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -167,6 +167,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3887. Support changing Application priority during runtime. (Sunil G
 via jianhe)
 
+YARN-4023. Publish Application Priority to TimelineServer. (Sunil G 
+via rohithsharmaks)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c12adb7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
index 258b991..fa3b1e5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
@@ -92,7 +92,7 @@ public abstract class ApplicationReport {
   long startTime, long finishTime, FinalApplicationStatus finalStatus,
   ApplicationResourceUsageReport appResources, String origTrackingUrl,
   float progress, String applicationType, Token amRmToken,
-  SetString tags, boolean unmanagedApplication) {
+  SetString tags, boolean unmanagedApplication, Priority priority) {
 ApplicationReport report =
 newInstance(applicationId, applicationAttemptId, user, queue, name,
   host, rpcPort, clientToAMToken, state, diagnostics, url, startTime,
@@ -100,6 +100,7 @@ public abstract class ApplicationReport {
   applicationType, amRmToken);
 report.setApplicationTags(tags);
 report.setUnmanagedApp(unmanagedApplication);
+report.setPriority(priority);
 return report;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c12adb7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index eb50e00..8f17c8f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -104,7 +104,7 @@ public class TestYarnCLI {
   user, queue, appname, host, 124, null,
   YarnApplicationState.FINISHED, diagnostics, url, 0, 0,
   FinalApplicationStatus.SUCCEEDED, usageReport, N/A, 0.53789f, 
YARN,
-  null, null, false);
+  null, null, false, 

[12/50] [abbrv] hadoop git commit: YARN-4026. Refactored ContainerAllocator to accept a list of priorites rather than a single priority. Contributed by Wangda Tan

2015-08-24 Thread zhz
YARN-4026. Refactored ContainerAllocator to accept a list of priorites rather 
than a single priority. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5003be9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5003be9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5003be9

Branch: refs/heads/HDFS-7285
Commit: e5003be907acef87c2770e3f2914953f62017b0e
Parents: 1c12adb
Author: Jian He jia...@apache.org
Authored: Wed Aug 12 15:07:50 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Wed Aug 12 15:07:50 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/capacity/LeafQueue.java   |   7 +-
 .../capacity/allocator/ContainerAllocation.java |  33 +++--
 .../capacity/allocator/ContainerAllocator.java  | 109 +-
 .../allocator/RegularContainerAllocator.java| 123 
 .../scheduler/common/fica/FiCaSchedulerApp.java | 103 +
 .../scheduler/capacity/TestLeafQueue.java   | 144 +++
 7 files changed, 315 insertions(+), 207 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5003be9/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 199a930..4c70a8a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -393,6 +393,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3966. Fix excessive loggings in CapacityScheduler. (Jian He via 
wangda)
 
+YARN-4026. Refactored ContainerAllocator to accept a list of priorites
+rather than a single priority. (Wangda Tan via jianhe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5003be9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 5976f58..ff1baff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -763,8 +763,9 @@ public class LeafQueue extends AbstractCSQueue {
   FiCaSchedulerApp application =
   getApplication(reservedContainer.getApplicationAttemptId());
   synchronized (application) {
-CSAssignment assignment = application.assignReservedContainer(node, 
reservedContainer,
-clusterResource, schedulingMode);
+CSAssignment assignment =
+application.assignContainers(clusterResource, node,
+currentResourceLimits, schedulingMode, reservedContainer);
 handleExcessReservedContainer(clusterResource, assignment);
 return assignment;
   }
@@ -812,7 +813,7 @@ public class LeafQueue extends AbstractCSQueue {
   // Try to schedule
   CSAssignment assignment =
   application.assignContainers(clusterResource, node,
-  currentResourceLimits, schedulingMode);
+  currentResourceLimits, schedulingMode, null);
 
   if (LOG.isDebugEnabled()) {
 LOG.debug(post-assignContainers for application 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5003be9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
index 00c1bb9..1df9410 100644
--- 

[34/50] [abbrv] hadoop git commit: MAPREDUCE-5817. Mappers get rescheduled on node transition even after all reducers are completed. (Sangjin Lee via kasha)

2015-08-24 Thread zhz
MAPREDUCE-5817. Mappers get rescheduled on node transition even after all 
reducers are completed. (Sangjin Lee via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27d24f96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27d24f96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27d24f96

Branch: refs/heads/HDFS-7285
Commit: 27d24f96ab8d17e839a1ef0d7076efc78d28724a
Parents: 84bf712
Author: Karthik Kambatla ka...@apache.org
Authored: Fri Aug 14 12:29:50 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Fri Aug 14 12:29:50 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../mapreduce/v2/app/job/impl/JobImpl.java  |  38 --
 .../mapreduce/v2/app/job/impl/TestJobImpl.java  | 130 ++-
 3 files changed, 156 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27d24f96/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index c424667..19bd697 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -552,6 +552,9 @@ Release 2.8.0 - UNRELEASED
 
 MAPREDUCE-6433. launchTime may be negative. (Zhihai Xu)
 
+MAPREDUCE-5817. Mappers get rescheduled on node transition even after all
+reducers are completed. (Sangjin Lee via kasha)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27d24f96/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 9d141eb..fc9a3a5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -129,6 +129,7 @@ import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
 import org.apache.hadoop.yarn.util.Clock;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /** Implementation of Job interface. Maintains the state machines of Job.
@@ -1330,15 +1331,21 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
   
   private void actOnUnusableNode(NodeId nodeId, NodeState nodeState) {
 // rerun previously successful map tasks
-ListTaskAttemptId taskAttemptIdList = 
nodesToSucceededTaskAttempts.get(nodeId);
-if(taskAttemptIdList != null) {
-  String mesg = TaskAttempt killed because it ran on unusable node 
-  + nodeId;
-  for(TaskAttemptId id : taskAttemptIdList) {
-if(TaskType.MAP == id.getTaskId().getTaskType()) {
-  // reschedule only map tasks because their outputs maybe unusable
-  LOG.info(mesg + . AttemptId: + id);
-  eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
+// do this only if the job is still in the running state and there are
+// running reducers
+if (getInternalState() == JobStateInternal.RUNNING 
+!allReducersComplete()) {
+  ListTaskAttemptId taskAttemptIdList =
+  nodesToSucceededTaskAttempts.get(nodeId);
+  if (taskAttemptIdList != null) {
+String mesg = TaskAttempt killed because it ran on unusable node 
++ nodeId;
+for (TaskAttemptId id : taskAttemptIdList) {
+  if (TaskType.MAP == id.getTaskId().getTaskType()) {
+// reschedule only map tasks because their outputs maybe unusable
+LOG.info(mesg + . AttemptId: + id);
+eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
+  }
 }
   }
 }
@@ -1346,6 +1353,10 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
 // RMContainerAllocator
   }
 
+  private boolean allReducersComplete() {
+return numReduceTasks == 0 || numReduceTasks == getCompletedReduces();
+  }
+
   /*
   private int getBlockSize() {
 String inputClassName = conf.get(MRJobConfig.INPUT_FORMAT_CLASS_ATTR);
@@ -2084,13 +2095,18 @@ public class JobImpl implements 

[30/50] [abbrv] hadoop git commit: HDFS-7225. Remove stale block invalidation work when DN re-registers with different UUID. (Zhe Zhang and Andrew Wang) Moved to 2.6.1

2015-08-24 Thread zhz
HDFS-7225. Remove stale block invalidation work when DN re-registers with 
different UUID. (Zhe Zhang and Andrew Wang)
Moved to 2.6.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08bd4edf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08bd4edf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08bd4edf

Branch: refs/heads/HDFS-7285
Commit: 08bd4edf4092901273da0d73a5cc760fdc11052b
Parents: e7aa813
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Aug 14 12:38:00 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri Aug 14 12:38:00 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08bd4edf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1507cbe..dba4535 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1848,9 +1848,6 @@ Release 2.7.0 - 2015-04-20
 HDFS-7406. SimpleHttpProxyHandler puts incorrect Connection: Close
 header. (wheat9)
 
-HDFS-7225. Remove stale block invalidation work when DN re-registers with
-different UUID. (Zhe Zhang and Andrew Wang)
-
 HDFS-7374. Allow decommissioning of dead DataNodes. (Zhe Zhang)
 
 HDFS-7403. Inaccurate javadoc of BlockUCState#COMPLETE state. (
@@ -2339,6 +2336,9 @@ Release 2.6.1 - UNRELEASED
 HDFS-7263. Snapshot read can reveal future bytes for appended files.
 (Tao Luo via shv)
 
+HDFS-7225. Remove stale block invalidation work when DN re-registers with
+different UUID. (Zhe Zhang and Andrew Wang)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



hadoop git commit: HDFS-8928. Improvements for BlockUnderConstructionFeature: ReplicaUnderConstruction as a separate class and replicas as an array. Contributed by Jing Zhao.

2015-08-24 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 48774d0a4 - bdd79388f


HDFS-8928. Improvements for BlockUnderConstructionFeature: 
ReplicaUnderConstruction as a separate class and replicas as an array. 
Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bdd79388
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bdd79388
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bdd79388

Branch: refs/heads/trunk
Commit: bdd79388f39f4f35af7decd5703eff587b0ddfb7
Parents: 48774d0
Author: Jing Zhao ji...@apache.org
Authored: Mon Aug 24 15:53:34 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon Aug 24 15:53:34 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   4 +
 .../hdfs/server/blockmanagement/BlockInfo.java  |   3 +-
 .../BlockUnderConstructionFeature.java  | 211 ++-
 .../ReplicaUnderConstruction.java   | 119 +++
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |   3 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   7 +-
 6 files changed, 195 insertions(+), 152 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd79388/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c90c247..b17492d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -829,6 +829,10 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8934. Move ShortCircuitShm to hdfs-client. (Mingliang Liu via wheat9)
 
+HDFS-8928. Improvements for BlockUnderConstructionFeature:
+ReplicaUnderConstruction as a separate class and replicas as an array.
+(jing9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd79388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 72fc005..a9dfdde 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -24,7 +24,6 @@ import java.util.List;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
-import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature.ReplicaUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.LightWeightGSet;
@@ -363,7 +362,7 @@ public abstract class  BlockInfo extends Block
 } else {
   // the block is already under construction
   uc.setBlockUCState(s);
-  uc.setExpectedLocations(this.getGenerationStamp(), targets);
+  uc.setExpectedLocations(this, targets);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd79388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
index de51b2f..88cf06d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
@@ -17,28 +17,27 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
+import java.util.ArrayList;
+import java.util.List;
+
+import static 

hadoop git commit: HDFS-8948. Use GenericTestUtils to set log levels in TestPread and TestReplaceDatanodeOnFailure. Contributed by Mingliang Liu.

2015-08-24 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eefc1c174 - ef8437a38


HDFS-8948. Use GenericTestUtils to set log levels in TestPread and 
TestReplaceDatanodeOnFailure. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef8437a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef8437a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef8437a3

Branch: refs/heads/branch-2
Commit: ef8437a3823558e5d16861710ca75f4e75226712
Parents: eefc1c1
Author: Haohui Mai whe...@apache.org
Authored: Mon Aug 24 16:16:10 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon Aug 24 16:16:33 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/TestPread.java  | 4 ++--
 .../org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java | 4 ++--
 3 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef8437a3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b95b6ac..7e5adbc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -864,6 +864,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8930. Block report lease may leak if the 2nd full block report comes
 when NN is still in safemode (Colin P. McCabe via Jing Zhao)
 
+HDFS-8948. Use GenericTestUtils to set log levels in TestPread and
+TestReplaceDatanodeOnFailure. (Mingliang Liu via wheat9)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef8437a3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index 3c9582b..ee3ef89 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -30,7 +30,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -42,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Before;
 import org.junit.Test;
@@ -264,7 +264,7 @@ public class TestPread {
   @Test
   public void testPreadDFSNoChecksum() throws IOException {
 Configuration conf = new Configuration();
-((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL);
+GenericTestUtils.setLogLevel(DataTransferProtocol.LOG, Level.ALL);
 dfsPreadTest(conf, false, false);
 dfsPreadTest(conf, true, false);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef8437a3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
index 9161417..76d592c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.util.Arrays;
 
 import org.apache.commons.logging.Log;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -33,6 +32,7 @@ import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy;
 import org.apache.hadoop.io.IOUtils;
+import 

hadoop git commit: HDFS-8932. NPE thrown in NameNode when try to get TotalSyncCount metric before editLogStream initialization. Contributed by Surendra Singh Lilhore

2015-08-24 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ef8437a38 - 137bde075


HDFS-8932. NPE thrown in NameNode when try to get TotalSyncCount metric before 
editLogStream initialization. Contributed by Surendra Singh Lilhore

(cherry picked from commit 3b00eaea256d252be3361a7d9106b88756fcb9ba)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/137bde07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/137bde07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/137bde07

Branch: refs/heads/branch-2
Commit: 137bde0755f1dc67b3569f3f3967213fc8419d38
Parents: ef8437a
Author: Xiaoyu Yao x...@apache.org
Authored: Mon Aug 24 16:56:24 2015 -0700
Committer: Xiaoyu Yao x...@apache.org
Committed: Mon Aug 24 16:57:02 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../apache/hadoop/hdfs/server/namenode/FSEditLog.java   | 12 
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   |  7 ++-
 3 files changed, 17 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/137bde07/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7e5adbc..3100fd0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -867,6 +867,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8948. Use GenericTestUtils to set log levels in TestPread and
 TestReplaceDatanodeOnFailure. (Mingliang Liu via wheat9)
 
+HDFS-8932. NPE thrown in NameNode when try to get TotalSyncCount metric
+before editLogStream initialization. (Surendra Singh Lilhore via xyao)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/137bde07/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 81a0954..d7294d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -1648,10 +1648,14 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   /**
-   +   * Return total number of syncs happened on this edit log.
-   +   * @return long - count
-   +   */
+   * Return total number of syncs happened on this edit log.
+   * @return long - count
+   */
   public long getTotalSyncCount() {
-return editLogStream.getNumSync();
+if (editLogStream != null) {
+  return editLogStream.getNumSync();
+} else {
+  return 0;
+}
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/137bde07/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9299265..883ed08 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7291,7 +7291,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   @Metric({TotalSyncTimes,
   Total time spend in sync operation on various edit logs})
   public String getTotalSyncTimes() {
-return fsImage.editLog.getJournalSet().getSyncTimes();
+JournalSet journalSet = fsImage.editLog.getJournalSet();
+if (journalSet != null) {
+  return journalSet.getSyncTimes();
+} else {
+  return ;
+}
   }
 }
 



hadoop git commit: YARN-3058. Miscellaneous issues in NodeManager project (Naganarasimha G R via sjlee)

2015-08-24 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 9d1494733 - 3c36922d7


YARN-3058. Miscellaneous issues in NodeManager project (Naganarasimha G R via 
sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c36922d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c36922d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c36922d

Branch: refs/heads/YARN-2928
Commit: 3c36922d70987b7459c36bf4a61db768dade170f
Parents: 9d14947
Author: Sangjin Lee sj...@apache.org
Authored: Mon Aug 24 17:36:31 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Mon Aug 24 17:36:31 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../containermanager/ContainerManagerImpl.java  | 33 +++-
 .../metrics/TimelineServiceV2Publisher.java |  2 --
 .../TestSystemMetricsPublisherForV2.java|  8 -
 4 files changed, 28 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c36922d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a3c89e3..a974fff 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -144,6 +144,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
 YARN-4064. build is broken at TestHBaseTimelineWriterImpl.java (sjlee)
 
+YARN-3058. Miscellaneous issues in NodeManager project (Naganarasimha G R
+via sjlee)
+
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c36922d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 2ea2ec1..50f2dfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -896,21 +896,24 @@ public class ContainerManagerImpl extends 
CompositeService implements
 if (flowRunIdStr != null  !flowRunIdStr.isEmpty()) {
   flowRunId = Long.parseLong(flowRunIdStr);
 }
-Application application = new ApplicationImpl(dispatcher, user,
-flowName, flowVersion, flowRunId, applicationID, credentials, 
context);
-if (null == context.getApplications().putIfAbsent(applicationID,
-  application)) {
-  LOG.info(Creating a new application reference for app  + 
applicationID);
-  LogAggregationContext logAggregationContext =
-  containerTokenIdentifier.getLogAggregationContext();
-  MapApplicationAccessType, String appAcls =
-  container.getLaunchContext().getApplicationACLs();
-  context.getNMStateStore().storeApplication(applicationID,
-  buildAppProto(applicationID, user, credentials, appAcls,
-logAggregationContext));
-  dispatcher.getEventHandler().handle(
-new ApplicationInitEvent(applicationID, appAcls,
-  logAggregationContext));
+if (!context.getApplications().containsKey(applicationID)) {
+  Application application =
+  new ApplicationImpl(dispatcher, user, flowName, flowVersion,
+  flowRunId, applicationID, credentials, context);
+  if (context.getApplications().putIfAbsent(applicationID,
+  application) == null) {
+LOG.info(Creating a new application reference for app 
++ applicationID);
+LogAggregationContext logAggregationContext =
+containerTokenIdentifier.getLogAggregationContext();
+MapApplicationAccessType, String appAcls =
+container.getLaunchContext().getApplicationACLs();
+context.getNMStateStore().storeApplication(applicationID,
+buildAppProto(applicationID, user, credentials, appAcls,
+logAggregationContext));
+dispatcher.getEventHandler().handle(new 

hadoop git commit: HDFS-8928. Improvements for BlockUnderConstructionFeature: ReplicaUnderConstruction as a separate class and replicas as an array. Contributed by Jing Zhao.

2015-08-24 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7a0a31586 - eefc1c174


HDFS-8928. Improvements for BlockUnderConstructionFeature: 
ReplicaUnderConstruction as a separate class and replicas as an array. 
Contributed by Jing Zhao.

(cherry picked from commit bdd79388f39f4f35af7decd5703eff587b0ddfb7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eefc1c17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eefc1c17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eefc1c17

Branch: refs/heads/branch-2
Commit: eefc1c174b8e780d48d5d6678f3b6f828dd8e631
Parents: 7a0a315
Author: Jing Zhao ji...@apache.org
Authored: Mon Aug 24 15:53:34 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon Aug 24 15:55:58 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   4 +
 .../hdfs/server/blockmanagement/BlockInfo.java  |   3 +-
 .../BlockUnderConstructionFeature.java  | 211 ++-
 .../ReplicaUnderConstruction.java   | 119 +++
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |   3 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   7 +-
 6 files changed, 195 insertions(+), 152 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eefc1c17/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a52367b..b95b6ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -484,6 +484,10 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8934. Move ShortCircuitShm to hdfs-client. (Mingliang Liu via wheat9)
 
+HDFS-8928. Improvements for BlockUnderConstructionFeature:
+ReplicaUnderConstruction as a separate class and replicas as an array.
+(jing9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eefc1c17/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index f3626ae..ad1813d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -24,7 +24,6 @@ import java.util.List;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
-import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature.ReplicaUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.LightWeightGSet;
@@ -366,7 +365,7 @@ public abstract class  BlockInfo extends Block
 } else {
   // the block is already under construction
   uc.setBlockUCState(s);
-  uc.setExpectedLocations(this.getGenerationStamp(), targets);
+  uc.setExpectedLocations(this, targets);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eefc1c17/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
index de51b2f..88cf06d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
@@ -17,28 +17,27 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
+import java.util.ArrayList;
+import 

hadoop git commit: HDFS-8948. Use GenericTestUtils to set log levels in TestPread and TestReplaceDatanodeOnFailure. Contributed by Mingliang Liu.

2015-08-24 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk bdd79388f - 66d0c81d8


HDFS-8948. Use GenericTestUtils to set log levels in TestPread and 
TestReplaceDatanodeOnFailure. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66d0c81d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66d0c81d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66d0c81d

Branch: refs/heads/trunk
Commit: 66d0c81d8f4e200a5051c8df87be890c9ad8772e
Parents: bdd7938
Author: Haohui Mai whe...@apache.org
Authored: Mon Aug 24 16:16:10 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon Aug 24 16:18:15 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/TestPread.java  | 4 ++--
 .../org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java | 4 ++--
 3 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66d0c81d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b17492d..1844357 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1208,6 +1208,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8930. Block report lease may leak if the 2nd full block report comes
 when NN is still in safemode (Colin P. McCabe via Jing Zhao)
 
+HDFS-8948. Use GenericTestUtils to set log levels in TestPread and
+TestReplaceDatanodeOnFailure. (Mingliang Liu via wheat9)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66d0c81d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index ed553f6..43650a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -29,7 +29,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -41,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Before;
 import org.junit.Test;
@@ -261,7 +261,7 @@ public class TestPread {
   @Test
   public void testPreadDFSNoChecksum() throws IOException {
 Configuration conf = new Configuration();
-((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL);
+GenericTestUtils.setLogLevel(DataTransferProtocol.LOG, Level.ALL);
 dfsPreadTest(conf, false, false);
 dfsPreadTest(conf, true, false);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66d0c81d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
index f92f287..d351020 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.util.Arrays;
 
 import org.apache.commons.logging.Log;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -33,6 +32,7 @@ import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy;
 import org.apache.hadoop.io.IOUtils;
+import 

Git Push Summary

2015-08-24 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285-merge [deleted] b2049f95a


hadoop git commit: YARN-4014. Support user cli interface in for Application Priority. Contributed by Rohith Sharma K S

2015-08-24 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3b00eaea2 - 57c7ae1af


YARN-4014. Support user cli interface in for Application Priority. Contributed 
by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57c7ae1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57c7ae1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57c7ae1a

Branch: refs/heads/trunk
Commit: 57c7ae1affb2e1821fbdc3f47738d7e6fd83c7c1
Parents: 3b00eae
Author: Jian He jia...@apache.org
Authored: Mon Aug 24 20:36:08 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Mon Aug 24 20:36:44 2015 -0700

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |   7 +
 .../hadoop/mapred/TestClientRedirect.java   |   9 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/api/ApplicationClientProtocol.java |  18 ++
 .../UpdateApplicationPriorityRequest.java   |  80 +
 .../UpdateApplicationPriorityResponse.java  |  47 +
 .../main/proto/applicationclient_protocol.proto |   1 +
 .../src/main/proto/yarn_service_protos.proto|   8 +
 .../hadoop/yarn/client/api/YarnClient.java  |  17 ++
 .../yarn/client/api/impl/YarnClientImpl.java|  11 ++
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  29 
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  29 
 .../ApplicationClientProtocolPBClientImpl.java  |  20 +++
 .../ApplicationClientProtocolPBServiceImpl.java |  22 +++
 .../UpdateApplicationPriorityRequestPBImpl.java | 171 +++
 ...UpdateApplicationPriorityResponsePBImpl.java |  69 
 .../server/resourcemanager/ClientRMService.java |  73 
 .../server/resourcemanager/RMAuditLogger.java   |   2 +
 .../resourcemanager/recovery/RMStateStore.java  |  12 +-
 .../recovery/RMStateUpdateAppEvent.java |  13 ++
 .../scheduler/capacity/CapacityScheduler.java   |  16 +-
 .../resourcemanager/TestClientRMService.java|  63 +++
 22 files changed, 713 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57c7ae1a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 90f6876..91c3086 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
@@ -466,4 +467,10 @@ public class ResourceMgrDelegate extends YarnClient {
   throws YarnException, IOException {
 return client.getClusterNodeLabels();
   }
+
+  @Override
+  public void updateApplicationPriority(ApplicationId applicationId,
+  Priority priority) throws YarnException, IOException {
+client.updateApplicationPriority(applicationId, priority);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57c7ae1a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index bb00b19..1bf1408 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -114,6 +114,8 @@ import 

hadoop git commit: YARN-4014. Support user cli interface in for Application Priority. Contributed by Rohith Sharma K S (cherry picked from commit 57c7ae1affb2e1821fbdc3f47738d7e6fd83c7c1)

2015-08-24 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 137bde075 - 387076894


YARN-4014. Support user cli interface in for Application Priority. Contributed 
by Rohith Sharma K S
(cherry picked from commit 57c7ae1affb2e1821fbdc3f47738d7e6fd83c7c1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38707689
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38707689
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38707689

Branch: refs/heads/branch-2
Commit: 387076894ff814339015bf55f8da254416f77633
Parents: 137bde0
Author: Jian He jia...@apache.org
Authored: Mon Aug 24 20:36:08 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Mon Aug 24 20:37:18 2015 -0700

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |   7 +
 .../hadoop/mapred/TestClientRedirect.java   |   9 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/api/ApplicationClientProtocol.java |  18 ++
 .../UpdateApplicationPriorityRequest.java   |  80 +
 .../UpdateApplicationPriorityResponse.java  |  47 +
 .../main/proto/applicationclient_protocol.proto |   1 +
 .../src/main/proto/yarn_service_protos.proto|   8 +
 .../hadoop/yarn/client/api/YarnClient.java  |  17 ++
 .../yarn/client/api/impl/YarnClientImpl.java|  11 ++
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  29 
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  29 
 .../ApplicationClientProtocolPBClientImpl.java  |  20 +++
 .../ApplicationClientProtocolPBServiceImpl.java |  22 +++
 .../UpdateApplicationPriorityRequestPBImpl.java | 171 +++
 ...UpdateApplicationPriorityResponsePBImpl.java |  69 
 .../server/resourcemanager/ClientRMService.java |  73 
 .../server/resourcemanager/RMAuditLogger.java   |   2 +
 .../resourcemanager/recovery/RMStateStore.java  |  12 +-
 .../recovery/RMStateUpdateAppEvent.java |  13 ++
 .../scheduler/capacity/CapacityScheduler.java   |  16 +-
 .../resourcemanager/TestClientRMService.java|  63 +++
 22 files changed, 713 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38707689/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 90f6876..91c3086 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
@@ -466,4 +467,10 @@ public class ResourceMgrDelegate extends YarnClient {
   throws YarnException, IOException {
 return client.getClusterNodeLabels();
   }
+
+  @Override
+  public void updateApplicationPriority(ApplicationId applicationId,
+  Priority priority) throws YarnException, IOException {
+client.updateApplicationPriority(applicationId, priority);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38707689/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index bb00b19..1bf1408 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ 

hadoop git commit: HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals before deleting. Contributed by Casey Brotherton.

2015-08-24 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 387076894 - e99349830


HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals 
before deleting. Contributed by Casey Brotherton.

(cherry picked from commit af78767870b8296886c03f8be24cf13a4e2bd4b0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9934983
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9934983
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9934983

Branch: refs/heads/branch-2
Commit: e9934983027bc3cd7cd82e49c436c66f507b4cd2
Parents: 3870768
Author: Harsh J ha...@cloudera.com
Authored: Tue Aug 25 11:21:19 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Aug 25 11:22:08 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../java/org/apache/hadoop/fs/TrashPolicyDefault.java| 11 +--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9934983/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6c27abe..15ec117 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -286,6 +286,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals
+before deleting (Casey Brotherton via harsh)
+
 HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
 is an I/O error during requestShortCircuitShm (cmccabe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9934983/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 88aeab5..1ed8a46 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -161,12 +161,19 @@ public class TrashPolicyDefault extends TrashPolicy {
   @SuppressWarnings(deprecation)
   @Override
   public void createCheckpoint() throws IOException {
+createCheckpoint(new Date());
+  }
+
+  @SuppressWarnings(deprecation)
+  public void createCheckpoint(Date date) throws IOException {
+
 if (!fs.exists(current)) // no trash, no checkpoint
   return;
 
 Path checkpointBase;
 synchronized (CHECKPOINT) {
-  checkpointBase = new Path(trash, CHECKPOINT.format(new Date()));
+  checkpointBase = new Path(trash, CHECKPOINT.format(date));
+
 }
 Path checkpoint = checkpointBase;
 
@@ -287,7 +294,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 TrashPolicyDefault trash = new TrashPolicyDefault(
 fs, home.getPath(), conf);
 trash.deleteCheckpoint();
-trash.createCheckpoint();
+trash.createCheckpoint(new Date(now));
   } catch (IOException e) {
 LOG.warn(Trash caught: +e+. Skipping +home.getPath()+.);
   } 



hadoop git commit: HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals before deleting. Contributed by Casey Brotherton.

2015-08-24 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 57c7ae1af - af7876787


HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals 
before deleting. Contributed by Casey Brotherton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af787678
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af787678
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af787678

Branch: refs/heads/trunk
Commit: af78767870b8296886c03f8be24cf13a4e2bd4b0
Parents: 57c7ae1
Author: Harsh J ha...@cloudera.com
Authored: Tue Aug 25 11:21:19 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Aug 25 11:21:19 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../java/org/apache/hadoop/fs/TrashPolicyDefault.java| 11 +--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af787678/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4250fc3..0ec4ed6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -796,6 +796,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals
+before deleting (Casey Brotherton via harsh)
+
 HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
 is an I/O error during requestShortCircuitShm (cmccabe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af787678/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 88aeab5..1ed8a46 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -161,12 +161,19 @@ public class TrashPolicyDefault extends TrashPolicy {
   @SuppressWarnings(deprecation)
   @Override
   public void createCheckpoint() throws IOException {
+createCheckpoint(new Date());
+  }
+
+  @SuppressWarnings(deprecation)
+  public void createCheckpoint(Date date) throws IOException {
+
 if (!fs.exists(current)) // no trash, no checkpoint
   return;
 
 Path checkpointBase;
 synchronized (CHECKPOINT) {
-  checkpointBase = new Path(trash, CHECKPOINT.format(new Date()));
+  checkpointBase = new Path(trash, CHECKPOINT.format(date));
+
 }
 Path checkpoint = checkpointBase;
 
@@ -287,7 +294,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 TrashPolicyDefault trash = new TrashPolicyDefault(
 fs, home.getPath(), conf);
 trash.deleteCheckpoint();
-trash.createCheckpoint();
+trash.createCheckpoint(new Date(now));
   } catch (IOException e) {
 LOG.warn(Trash caught: +e+. Skipping +home.getPath()+.);
   }