hadoop git commit: HDFS-7725. Incorrect 'nodes in service' metrics caused all writes to fail. Contributed by Ming Ma.

2015-04-08 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk a42bb1cd9 - 6af0d74a7


HDFS-7725. Incorrect 'nodes in service' metrics caused all writes to fail. 
Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6af0d74a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6af0d74a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6af0d74a

Branch: refs/heads/trunk
Commit: 6af0d74a75f0f58d5e92e2e91e87735b9a62bb12
Parents: a42bb1c
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 8 15:52:06 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Wed Apr 8 15:52:06 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../blockmanagement/DecommissionManager.java| 28 +--
 .../blockmanagement/HeartbeatManager.java   | 29 ++--
 .../namenode/TestNamenodeCapacityReport.java|  5 
 4 files changed, 41 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6af0d74a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 852006d..95c6912 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -441,6 +441,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-5215. dfs.datanode.du.reserved is not considered while computing
 available space ( Brahma Reddy Battula via Yongjun Zhang)
 
+HDFS-7725. Incorrect nodes in service metrics caused all writes to fail.
+(Ming Ma via wang)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6af0d74a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 9355329..7f3d778 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -197,23 +197,21 @@ public class DecommissionManager {
*/
   @VisibleForTesting
   public void startDecommission(DatanodeDescriptor node) {
-if (!node.isDecommissionInProgress()) {
-  if (!node.isAlive) {
-LOG.info(Dead node {} is decommissioned immediately., node);
-node.setDecommissioned();
-  } else if (!node.isDecommissioned()) {
+if (!node.isDecommissionInProgress()  !node.isDecommissioned()) {
+  // Update DN stats maintained by HeartbeatManager
+  hbManager.startDecommission(node);
+  // hbManager.startDecommission will set dead node to decommissioned.
+  if (node.isDecommissionInProgress()) {
 for (DatanodeStorageInfo storage : node.getStorageInfos()) {
-  LOG.info(Starting decommission of {} {} with {} blocks, 
+  LOG.info(Starting decommission of {} {} with {} blocks,
   node, storage, storage.numBlocks());
 }
-// Update DN stats maintained by HeartbeatManager
-hbManager.startDecommission(node);
 node.decommissioningStatus.setStartTime(monotonicNow());
 pendingNodes.add(node);
   }
 } else {
-  LOG.trace(startDecommission: Node {} is already decommission in 
-  + progress, nothing to do., node);
+  LOG.trace(startDecommission: Node {} in {}, nothing to do. +
+  node, node.getAdminState());
 }
   }
 
@@ -221,12 +219,12 @@ public class DecommissionManager {
* Stop decommissioning the specified datanode. 
* @param node
*/
-  void stopDecommission(DatanodeDescriptor node) {
+  @VisibleForTesting
+  public void stopDecommission(DatanodeDescriptor node) {
 if (node.isDecommissionInProgress() || node.isDecommissioned()) {
-  LOG.info(Stopping decommissioning of node {}, node);
   // Update DN stats maintained by HeartbeatManager
   hbManager.stopDecommission(node);
-  // Over-replicated blocks will be detected and processed when 
+  // Over-replicated blocks will be detected and processed when
   // the dead node comes back and send in its full block report.
   if (node.isAlive) {
 blockManager.processOverReplicatedBlocksOnReCommission(node);
@@ -235,8 +233,8 @@ public class DecommissionManager {
   pendingNodes.remove(node);
   

hadoop git commit: Revert HDFS-7808.

2015-04-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6af0d74a7 - bd4c99bec


Revert HDFS-7808.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd4c99be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd4c99be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd4c99be

Branch: refs/heads/trunk
Commit: bd4c99bece56d1671c6f89eff8a529f4e7ac2933
Parents: 6af0d74
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 8 15:59:55 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 8 15:59:55 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ---
 .../apache/hadoop/hdfs/tools/DFSHAAdmin.java| 20 
 .../hadoop/hdfs/tools/TestDFSHAAdmin.java   | 20 
 .../hdfs/tools/TestDFSHAAdminMiniCluster.java   |  3 +++
 4 files changed, 43 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd4c99be/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 95c6912..d4a8c0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1235,9 +1235,6 @@ Release 2.7.0 - UNRELEASED
 HDFS-6662. WebHDFS cannot open a file if its path contains %.
 (Gerson Carlos via wheat9)
 
-HDFS-7808. Remove obsolete -ns options in in DFSHAAdmin.java.
-(Arshad Mohammad via wheat9)
-
 HDFS-7788. Post-2.6 namenode may not start up with an image containing
 inodes created with an old release. (Rushabh Shah via kihwal)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd4c99be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
index 6b6fb30..e9c611d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.io.PrintStream;
+import java.util.Arrays;
 import java.util.Collection;
 
 import org.apache.commons.logging.Log;
@@ -97,6 +98,25 @@ public class DFSHAAdmin extends HAAdmin {
   printUsage(errOut);
   return -1;
 }
+
+int i = 0;
+String cmd = argv[i++];
+
+if (-ns.equals(cmd)) {
+  if (i == argv.length) {
+errOut.println(Missing nameservice ID);
+printUsage(errOut);
+return -1;
+  }
+  nameserviceId = argv[i++];
+  if (i = argv.length) {
+errOut.println(Missing command);
+printUsage(errOut);
+return -1;
+  }
+  argv = Arrays.copyOfRange(argv, i, argv.length);
+}
+
 return super.runCmd(argv);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd4c99be/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
index 8ecc71a..33da4d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
@@ -147,6 +147,17 @@ public class TestDFSHAAdmin {
   }
   
   @Test
+  public void testNameserviceOption() throws Exception {
+assertEquals(-1, runTool(-ns));
+assertOutputContains(Missing nameservice ID);
+assertEquals(-1, runTool(-ns, ns1));
+assertOutputContains(Missing command);
+// ns1 isn't defined but we check this lazily and help doesn't use the ns
+assertEquals(0, runTool(-ns, ns1, -help, transitionToActive));
+assertOutputContains(Transitions the service into Active);
+  }
+
+  @Test
   public void testNamenodeResolution() throws Exception {
 
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
 assertEquals(0, runTool(-getServiceState, nn1));
@@ -268,6 +279,15 @@ public class TestDFSHAAdmin {
   }
 
   @Test
+  public void testFailoverWithFencerAndNameservice() throws Exception {
+
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+HdfsConfiguration conf = getHAConf();
+

hadoop git commit: Revert HDFS-7808.

2015-04-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 12739b541 - eb53d6b96


Revert HDFS-7808.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb53d6b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb53d6b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb53d6b9

Branch: refs/heads/branch-2.7
Commit: eb53d6b9679f1ce483201b85c48728f5e2ffee01
Parents: 12739b5
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 8 15:59:55 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 8 16:00:26 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ---
 .../apache/hadoop/hdfs/tools/DFSHAAdmin.java| 20 
 .../hadoop/hdfs/tools/TestDFSHAAdmin.java   | 20 
 .../hdfs/tools/TestDFSHAAdminMiniCluster.java   |  3 +++
 4 files changed, 43 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb53d6b9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3fe17cd..8f0d80d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -793,9 +793,6 @@ Release 2.7.0 - UNRELEASED
 HDFS-6662. WebHDFS cannot open a file if its path contains %.
 (Gerson Carlos via wheat9)
 
-HDFS-7808. Remove obsolete -ns options in in DFSHAAdmin.java.
-(Arshad Mohammad via wheat9)
-
 HDFS-7788. Post-2.6 namenode may not start up with an image containing
 inodes created with an old release. (Rushabh Shah via kihwal)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb53d6b9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
index 6b6fb30..e9c611d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.io.PrintStream;
+import java.util.Arrays;
 import java.util.Collection;
 
 import org.apache.commons.logging.Log;
@@ -97,6 +98,25 @@ public class DFSHAAdmin extends HAAdmin {
   printUsage(errOut);
   return -1;
 }
+
+int i = 0;
+String cmd = argv[i++];
+
+if (-ns.equals(cmd)) {
+  if (i == argv.length) {
+errOut.println(Missing nameservice ID);
+printUsage(errOut);
+return -1;
+  }
+  nameserviceId = argv[i++];
+  if (i = argv.length) {
+errOut.println(Missing command);
+printUsage(errOut);
+return -1;
+  }
+  argv = Arrays.copyOfRange(argv, i, argv.length);
+}
+
 return super.runCmd(argv);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb53d6b9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
index 8ecc71a..33da4d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
@@ -147,6 +147,17 @@ public class TestDFSHAAdmin {
   }
   
   @Test
+  public void testNameserviceOption() throws Exception {
+assertEquals(-1, runTool(-ns));
+assertOutputContains(Missing nameservice ID);
+assertEquals(-1, runTool(-ns, ns1));
+assertOutputContains(Missing command);
+// ns1 isn't defined but we check this lazily and help doesn't use the ns
+assertEquals(0, runTool(-ns, ns1, -help, transitionToActive));
+assertOutputContains(Transitions the service into Active);
+  }
+
+  @Test
   public void testNamenodeResolution() throws Exception {
 
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
 assertEquals(0, runTool(-getServiceState, nn1));
@@ -268,6 +279,15 @@ public class TestDFSHAAdmin {
   }
 
   @Test
+  public void testFailoverWithFencerAndNameservice() throws Exception {
+
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+HdfsConfiguration conf = getHAConf();
+

hadoop git commit: HDFS-8076. Code cleanup for DFSInputStream: use offset instead of LocatedBlock when possible. Contributed by Zhe Zhang.

2015-04-08 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 265ed1fe8 - a42bb1cd9


HDFS-8076. Code cleanup for DFSInputStream: use offset instead of LocatedBlock 
when possible. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a42bb1cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a42bb1cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a42bb1cd

Branch: refs/heads/trunk
Commit: a42bb1cd915abe5dc33eda3c01e8c74c64f35748
Parents: 265ed1f
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 8 15:41:48 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Wed Apr 8 15:41:48 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 40 ++--
 2 files changed, 24 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a42bb1cd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 35e9d54..852006d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -396,6 +396,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8046. Allow better control of getContentSummary (kihwal)
 
+HDFS-8076. Code cleanup for DFSInputStream: use offset instead of
+LocatedBlock when possible. (Zhe Zhang via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a42bb1cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index cf8015f..a9f2746 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1045,16 +1045,16 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 return errMsgr.toString();
   }
 
-  private void fetchBlockByteRange(LocatedBlock block, long start, long end,
+  private void fetchBlockByteRange(long blockStartOffset, long start, long end,
   byte[] buf, int offset,
   MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {
-block = getBlockAt(block.getStartOffset());
+LocatedBlock block = getBlockAt(blockStartOffset);
 while (true) {
   DNAddrPair addressPair = chooseDataNode(block, null);
   try {
-actualGetFromOneDataNode(addressPair, block, start, end, buf, offset,
-corruptedBlockMap);
+actualGetFromOneDataNode(addressPair, blockStartOffset, start, end,
+buf, offset, corruptedBlockMap);
 return;
   } catch (IOException e) {
 // Ignore. Already processed inside the function.
@@ -1064,7 +1064,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   }
 
   private CallableByteBuffer getFromOneDataNode(final DNAddrPair datanode,
-  final LocatedBlock block, final long start, final long end,
+  final long blockStartOffset, final long start, final long end,
   final ByteBuffer bb,
   final MapExtendedBlock, SetDatanodeInfo corruptedBlockMap,
   final int hedgedReadId) {
@@ -1077,8 +1077,8 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 TraceScope scope =
 Trace.startSpan(hedgedRead + hedgedReadId, parentSpan);
 try {
-  actualGetFromOneDataNode(datanode, block, start, end, buf, offset,
-  corruptedBlockMap);
+  actualGetFromOneDataNode(datanode, blockStartOffset, start, end, buf,
+  offset, corruptedBlockMap);
   return bb;
 } finally {
   scope.close();
@@ -1088,7 +1088,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   }
 
   private void actualGetFromOneDataNode(final DNAddrPair datanode,
-  LocatedBlock block, final long start, final long end, byte[] buf,
+  long blockStartOffset, final long start, final long end, byte[] buf,
   int offset, MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {
 DFSClientFaultInjector.get().startFetchFromDatanode();
@@ -1101,7 +1101,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   // start of the loop.
   CachingStrategy curCachingStrategy;
   boolean allowShortCircuitLocalReads;
-   

hadoop git commit: HDFS-8076. Code cleanup for DFSInputStream: use offset instead of LocatedBlock when possible. Contributed by Zhe Zhang.

2015-04-08 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 55b794e7f - 9792500c5


HDFS-8076. Code cleanup for DFSInputStream: use offset instead of LocatedBlock 
when possible. Contributed by Zhe Zhang.

(cherry picked from commit a42bb1cd915abe5dc33eda3c01e8c74c64f35748)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9792500c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9792500c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9792500c

Branch: refs/heads/branch-2
Commit: 9792500c54c453cd12027f07ffa109fe201b27a3
Parents: 55b794e
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 8 15:41:48 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Wed Apr 8 15:41:54 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 40 ++--
 2 files changed, 24 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9792500c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e7af8dc..4ae4881 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -81,6 +81,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8046. Allow better control of getContentSummary (kihwal)
 
+HDFS-8076. Code cleanup for DFSInputStream: use offset instead of
+LocatedBlock when possible. (Zhe Zhang via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9792500c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index cf8015f..a9f2746 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1045,16 +1045,16 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 return errMsgr.toString();
   }
 
-  private void fetchBlockByteRange(LocatedBlock block, long start, long end,
+  private void fetchBlockByteRange(long blockStartOffset, long start, long end,
   byte[] buf, int offset,
   MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {
-block = getBlockAt(block.getStartOffset());
+LocatedBlock block = getBlockAt(blockStartOffset);
 while (true) {
   DNAddrPair addressPair = chooseDataNode(block, null);
   try {
-actualGetFromOneDataNode(addressPair, block, start, end, buf, offset,
-corruptedBlockMap);
+actualGetFromOneDataNode(addressPair, blockStartOffset, start, end,
+buf, offset, corruptedBlockMap);
 return;
   } catch (IOException e) {
 // Ignore. Already processed inside the function.
@@ -1064,7 +1064,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   }
 
   private CallableByteBuffer getFromOneDataNode(final DNAddrPair datanode,
-  final LocatedBlock block, final long start, final long end,
+  final long blockStartOffset, final long start, final long end,
   final ByteBuffer bb,
   final MapExtendedBlock, SetDatanodeInfo corruptedBlockMap,
   final int hedgedReadId) {
@@ -1077,8 +1077,8 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 TraceScope scope =
 Trace.startSpan(hedgedRead + hedgedReadId, parentSpan);
 try {
-  actualGetFromOneDataNode(datanode, block, start, end, buf, offset,
-  corruptedBlockMap);
+  actualGetFromOneDataNode(datanode, blockStartOffset, start, end, buf,
+  offset, corruptedBlockMap);
   return bb;
 } finally {
   scope.close();
@@ -1088,7 +1088,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   }
 
   private void actualGetFromOneDataNode(final DNAddrPair datanode,
-  LocatedBlock block, final long start, final long end, byte[] buf,
+  long blockStartOffset, final long start, final long end, byte[] buf,
   int offset, MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {
 DFSClientFaultInjector.get().startFetchFromDatanode();
@@ -1101,7 +1101,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   // start of the loop.
   

hadoop git commit: Revert HDFS-7813.

2015-04-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 35816c438 - 6d2eca081


Revert HDFS-7813.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d2eca08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d2eca08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d2eca08

Branch: refs/heads/branch-2
Commit: 6d2eca081d21767313d529707654182482a0ec07
Parents: 35816c4
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 8 16:02:45 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 8 16:03:20 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 ---
 .../org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java   | 1 -
 2 files changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d2eca08/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4612a1d..26117e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -934,9 +934,6 @@ Release 2.7.0 - UNRELEASED
 HDFS-7814. Fix usage string of storageType parameter for
 dfsadmin -setSpaceQuota/clrSpaceQuota. (Xiaoyu Yao via cnauroth)
 
-HDFS-7813. TestDFSHAAdminMiniCluster#testFencer testcase is failing
-frequently. (Rakesh R via cnauroth)
-
 HDFS-7009. Active NN and standby NN have different live nodes.
 (Ming Ma via cnauroth)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d2eca08/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
index 2910004..ee1c184 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
@@ -155,7 +155,6 @@ public class TestDFSHAAdminMiniCluster {
 tool.setConf(conf);
 assertEquals(0, runTool(-transitionToActive, nn1));
 assertEquals(0, runTool(-failover, nn1, nn2));
-assertEquals(0, runTool(-failover, nn2, nn1));
 
 // Test failover with fencer and nameservice
 assertEquals(0, runTool(-ns, minidfs-ns, -failover, nn2, nn1));



hadoop git commit: HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can complete without blocks being replicated. Contributed by Ming Ma.

2015-04-08 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 82d56b337 - 5a540c3d3


HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can complete 
without blocks being replicated. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a540c3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a540c3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a540c3d

Branch: refs/heads/trunk
Commit: 5a540c3d3107199f4632e2ad7ee8ff913b107a04
Parents: 82d56b3
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 8 16:09:17 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Wed Apr 8 16:09:17 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/BlockManager.java|  5 +++
 .../apache/hadoop/hdfs/TestDecommission.java| 32 
 3 files changed, 20 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a540c3d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b203770..1fdf6aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -399,6 +399,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8076. Code cleanup for DFSInputStream: use offset instead of
 LocatedBlock when possible. (Zhe Zhang via wang)
 
+HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can
+complete without blocks being replicated. (Ming Ma via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a540c3d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 524afa0..9a6535e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3305,6 +3305,11 @@ public class BlockManager {
* liveness. Dead nodes cannot always be safely decommissioned.
*/
   boolean isNodeHealthyForDecommission(DatanodeDescriptor node) {
+if (!node.checkBlockReportReceived()) {
+  LOG.info(Node {} hasn't sent its first block report., node);
+  return false;
+}
+
 if (node.isAlive) {
   return true;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a540c3d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 081e40f..1ab7427 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -882,9 +882,12 @@ public class TestDecommission {
 int numNamenodes = 1;
 int numDatanodes = 1;
 int replicas = 1;
-
+conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
+DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
+conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 5);
+
 startCluster(numNamenodes, numDatanodes, conf);
-Path file1 = new Path(testDecommission.dat);
+Path file1 = new Path(testDecommissionWithNamenodeRestart.dat);
 FileSystem fileSys = cluster.getFileSystem();
 writeFile(fileSys, file1, replicas);
 
@@ -894,37 +897,26 @@ public class TestDecommission {
 String excludedDatanodeName = info[0].getXferAddr();
 
 writeConfigFile(excludeFile, new 
ArrayListString(Arrays.asList(excludedDatanodeName)));
-
+
 //Add a new datanode to cluster
 cluster.startDataNodes(conf, 1, true, null, null, null, null);
 numDatanodes+=1;
-
+
 assertEquals(Number of datanodes should be 2 , 2, 
cluster.getDataNodes().size());
 //Restart the namenode
 cluster.restartNameNode();
 DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(
 cluster.getNamesystem(), excludedDatanodeID);
 waitNodeState(datanodeInfo, 

hadoop git commit: HADOOP-11814. Reformat hadoop-annotations, o.a.h.classification.tools. Contributed by Li Lu.

2015-04-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 88ef75f16 - eafee9a04


HADOOP-11814. Reformat hadoop-annotations, o.a.h.classification.tools. 
Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eafee9a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eafee9a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eafee9a0

Branch: refs/heads/branch-2
Commit: eafee9a04e87ab3d543b931051eb174c065a011d
Parents: 88ef75f
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 8 17:56:23 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 8 17:56:30 2015 -0700

--
 .../ExcludePrivateAnnotationsJDiffDoclet.java   |   2 +-
 ...ExcludePrivateAnnotationsStandardDoclet.java |   2 +-
 .../classification/tools/RootDocProcessor.java  | 250 +--
 .../classification/tools/StabilityOptions.java  |  12 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 5 files changed, 136 insertions(+), 133 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eafee9a0/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
--
diff --git 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
index 66913ff..5cc422f 100644
--- 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
+++ 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
@@ -38,7 +38,7 @@ public class ExcludePrivateAnnotationsJDiffDoclet {
   
   public static boolean start(RootDoc root) {
 System.out.println(
-   ExcludePrivateAnnotationsJDiffDoclet.class.getSimpleName());
+ExcludePrivateAnnotationsJDiffDoclet.class.getSimpleName());
 return JDiff.start(RootDocProcessor.process(root));
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eafee9a0/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
--
diff --git 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
index 62c44ea..2176ea5 100644
--- 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
+++ 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
@@ -37,7 +37,7 @@ public class ExcludePrivateAnnotationsStandardDoclet {
   
   public static boolean start(RootDoc root) {
 System.out.println(
-   ExcludePrivateAnnotationsStandardDoclet.class.getSimpleName());
+ExcludePrivateAnnotationsStandardDoclet.class.getSimpleName());
 return Standard.start(RootDocProcessor.process(root));
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eafee9a0/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
--
diff --git 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
index a6ce035..8042f17 100644
--- 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
+++ 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
@@ -48,47 +48,47 @@ import org.apache.hadoop.classification.InterfaceStability;
  * Based on code from 
http://www.sixlegs.com/blog/java/exclude-javadoc-tag.html.
  */
 class RootDocProcessor {
-  
+
   static String stability = StabilityOptions.UNSTABLE_OPTION;
   static boolean treatUnannotatedClassesAsPrivate = false;
-  
+
   public static RootDoc process(RootDoc root) {
 return (RootDoc) process(root, RootDoc.class);
   }
-  
-  private static Object process(Object obj, Class? type) { 
-if (obj == null) { 
-  return null; 
-   

hadoop git commit: HADOOP-11814. Reformat hadoop-annotations, o.a.h.classification.tools. Contributed by Li Lu.

2015-04-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk cc2582354 - dc0282d64


HADOOP-11814. Reformat hadoop-annotations, o.a.h.classification.tools. 
Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc0282d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc0282d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc0282d6

Branch: refs/heads/trunk
Commit: dc0282d64c6528b02aa9f2df49be01223f087081
Parents: cc25823
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 8 17:56:23 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 8 17:56:23 2015 -0700

--
 .../ExcludePrivateAnnotationsJDiffDoclet.java   |   2 +-
 ...ExcludePrivateAnnotationsStandardDoclet.java |   2 +-
 .../classification/tools/RootDocProcessor.java  | 250 +--
 .../classification/tools/StabilityOptions.java  |  12 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 5 files changed, 136 insertions(+), 133 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc0282d6/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
--
diff --git 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
index 66913ff..5cc422f 100644
--- 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
+++ 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
@@ -38,7 +38,7 @@ public class ExcludePrivateAnnotationsJDiffDoclet {
   
   public static boolean start(RootDoc root) {
 System.out.println(
-   ExcludePrivateAnnotationsJDiffDoclet.class.getSimpleName());
+ExcludePrivateAnnotationsJDiffDoclet.class.getSimpleName());
 return JDiff.start(RootDocProcessor.process(root));
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc0282d6/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
--
diff --git 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
index 62c44ea..2176ea5 100644
--- 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
+++ 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
@@ -37,7 +37,7 @@ public class ExcludePrivateAnnotationsStandardDoclet {
   
   public static boolean start(RootDoc root) {
 System.out.println(
-   ExcludePrivateAnnotationsStandardDoclet.class.getSimpleName());
+ExcludePrivateAnnotationsStandardDoclet.class.getSimpleName());
 return Standard.start(RootDocProcessor.process(root));
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc0282d6/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
--
diff --git 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
index a6ce035..8042f17 100644
--- 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
+++ 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
@@ -48,47 +48,47 @@ import org.apache.hadoop.classification.InterfaceStability;
  * Based on code from 
http://www.sixlegs.com/blog/java/exclude-javadoc-tag.html.
  */
 class RootDocProcessor {
-  
+
   static String stability = StabilityOptions.UNSTABLE_OPTION;
   static boolean treatUnannotatedClassesAsPrivate = false;
-  
+
   public static RootDoc process(RootDoc root) {
 return (RootDoc) process(root, RootDoc.class);
   }
-  
-  private static Object process(Object obj, Class? type) { 
-if (obj == null) { 
-  return null; 
-} 
- 

hadoop git commit: HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can complete without blocks being replicated. Contributed by Ming Ma.

2015-04-08 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6d2eca081 - 069366e1b


HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can complete 
without blocks being replicated. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/069366e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/069366e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/069366e1

Branch: refs/heads/branch-2
Commit: 069366e1beaf57c26e19eb63eb5bc08e8d24562f
Parents: 6d2eca0
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 8 16:09:17 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Wed Apr 8 16:09:28 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/BlockManager.java|  5 +++
 .../apache/hadoop/hdfs/TestDecommission.java| 32 
 3 files changed, 20 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/069366e1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 26117e9..d10123f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -84,6 +84,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8076. Code cleanup for DFSInputStream: use offset instead of
 LocatedBlock when possible. (Zhe Zhang via wang)
 
+HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can
+complete without blocks being replicated. (Ming Ma via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/069366e1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index b2babf9..fd0db8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3308,6 +3308,11 @@ public class BlockManager {
* liveness. Dead nodes cannot always be safely decommissioned.
*/
   boolean isNodeHealthyForDecommission(DatanodeDescriptor node) {
+if (!node.checkBlockReportReceived()) {
+  LOG.info(Node {} hasn't sent its first block report., node);
+  return false;
+}
+
 if (node.isAlive) {
   return true;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/069366e1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 081e40f..1ab7427 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -882,9 +882,12 @@ public class TestDecommission {
 int numNamenodes = 1;
 int numDatanodes = 1;
 int replicas = 1;
-
+conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
+DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
+conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 5);
+
 startCluster(numNamenodes, numDatanodes, conf);
-Path file1 = new Path(testDecommission.dat);
+Path file1 = new Path(testDecommissionWithNamenodeRestart.dat);
 FileSystem fileSys = cluster.getFileSystem();
 writeFile(fileSys, file1, replicas);
 
@@ -894,37 +897,26 @@ public class TestDecommission {
 String excludedDatanodeName = info[0].getXferAddr();
 
 writeConfigFile(excludeFile, new 
ArrayListString(Arrays.asList(excludedDatanodeName)));
-
+
 //Add a new datanode to cluster
 cluster.startDataNodes(conf, 1, true, null, null, null, null);
 numDatanodes+=1;
-
+
 assertEquals(Number of datanodes should be 2 , 2, 
cluster.getDataNodes().size());
 //Restart the namenode
 cluster.restartNameNode();
 DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(
 cluster.getNamesystem(), excludedDatanodeID);
 waitNodeState(datanodeInfo, 

hadoop git commit: HDFS-7725. Incorrect 'nodes in service' metrics caused all writes to fail. Contributed by Ming Ma.

2015-04-08 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9792500c5 - 8104d5226


HDFS-7725. Incorrect 'nodes in service' metrics caused all writes to fail. 
Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8104d522
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8104d522
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8104d522

Branch: refs/heads/branch-2
Commit: 8104d522690fe9556177893770a388291cea0749
Parents: 9792500
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 8 15:52:06 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Wed Apr 8 15:52:15 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../blockmanagement/DecommissionManager.java| 28 +--
 .../blockmanagement/HeartbeatManager.java   | 29 ++--
 .../namenode/TestNamenodeCapacityReport.java|  5 
 4 files changed, 41 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8104d522/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4ae4881..32957a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -126,6 +126,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-5215. dfs.datanode.du.reserved is not considered while computing
 available space ( Brahma Reddy Battula via Yongjun Zhang)
 
+HDFS-7725. Incorrect nodes in service metrics caused all writes to fail.
+(Ming Ma via wang)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8104d522/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 9355329..7f3d778 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -197,23 +197,21 @@ public class DecommissionManager {
*/
   @VisibleForTesting
   public void startDecommission(DatanodeDescriptor node) {
-if (!node.isDecommissionInProgress()) {
-  if (!node.isAlive) {
-LOG.info(Dead node {} is decommissioned immediately., node);
-node.setDecommissioned();
-  } else if (!node.isDecommissioned()) {
+if (!node.isDecommissionInProgress()  !node.isDecommissioned()) {
+  // Update DN stats maintained by HeartbeatManager
+  hbManager.startDecommission(node);
+  // hbManager.startDecommission will set dead node to decommissioned.
+  if (node.isDecommissionInProgress()) {
 for (DatanodeStorageInfo storage : node.getStorageInfos()) {
-  LOG.info(Starting decommission of {} {} with {} blocks, 
+  LOG.info(Starting decommission of {} {} with {} blocks,
   node, storage, storage.numBlocks());
 }
-// Update DN stats maintained by HeartbeatManager
-hbManager.startDecommission(node);
 node.decommissioningStatus.setStartTime(monotonicNow());
 pendingNodes.add(node);
   }
 } else {
-  LOG.trace(startDecommission: Node {} is already decommission in 
-  + progress, nothing to do., node);
+  LOG.trace(startDecommission: Node {} in {}, nothing to do. +
+  node, node.getAdminState());
 }
   }
 
@@ -221,12 +219,12 @@ public class DecommissionManager {
* Stop decommissioning the specified datanode. 
* @param node
*/
-  void stopDecommission(DatanodeDescriptor node) {
+  @VisibleForTesting
+  public void stopDecommission(DatanodeDescriptor node) {
 if (node.isDecommissionInProgress() || node.isDecommissioned()) {
-  LOG.info(Stopping decommissioning of node {}, node);
   // Update DN stats maintained by HeartbeatManager
   hbManager.stopDecommission(node);
-  // Over-replicated blocks will be detected and processed when 
+  // Over-replicated blocks will be detected and processed when
   // the dead node comes back and send in its full block report.
   if (node.isAlive) {
 blockManager.processOverReplicatedBlocksOnReCommission(node);
@@ -235,8 +233,8 @@ public class DecommissionManager {
   pendingNodes.remove(node);
   

[1/3] hadoop git commit: HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. Contributed by Haohui Mai.

2015-04-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5a540c3d3 - cc2582354


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc258235/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
deleted file mode 100644
index 94a7f8e..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Integer parameter. */
-abstract class IntegerParam extends ParamInteger, IntegerParam.Domain {
-  IntegerParam(final Domain domain, final Integer value,
-  final Integer min, final Integer max) {
-super(domain, value);
-checkRange(min, max);
-  }
-
-  private void checkRange(final Integer min, final Integer max) {
-if (value == null) {
-  return;
-}
-if (min != null  value  min) {
-  throw new IllegalArgumentException(Invalid parameter range:  + 
getName()
-  +  =  + domain.toString(value) ++ domain.toString(min));
-}
-if (max != null  value  max) {
-  throw new IllegalArgumentException(Invalid parameter range:  + 
getName()
-  +  =  + domain.toString(value) ++ domain.toString(max));
-}
-  }
-  
-  @Override
-  public String toString() {
-return getName() + = + domain.toString(getValue());
-  }
-
-  /** @return the parameter value as a string */
-  @Override
-  public String getValueString() {
-return domain.toString(getValue());
-  }
-
-  /** The domain of the parameter. */
-  static final class Domain extends Param.DomainInteger {
-/** The radix of the number. */
-final int radix;
-
-Domain(final String paramName) {
-  this(paramName, 10);
-}
-
-Domain(final String paramName, final int radix) {
-  super(paramName);
-  this.radix = radix;
-}
-
-@Override
-public String getDomain() {
-  return  + NULL +  | int in radix  + radix + ;
-}
-
-@Override
-Integer parse(final String str) {
-  try{
-return NULL.equals(str) || str == null ? null : Integer.parseInt(str,
-  radix);
-  } catch(NumberFormatException e) {
-throw new IllegalArgumentException(Failed to parse \ + str
-+ \ as a radix- + radix +  integer., e);
-  }
-}
-
-/** Convert an Integer to a String. */ 
-String toString(final Integer n) {
-  return n == null? NULL: Integer.toString(n, radix);
-}
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc258235/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
deleted file mode 100644
index 5a609ee..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing 

[3/3] hadoop git commit: HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. Contributed by Haohui Mai.

2015-04-08 Thread wheat9
HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. Contributed by 
Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc258235
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc258235
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc258235

Branch: refs/heads/trunk
Commit: cc25823546643caf22bab63ec85fe0c8939593d8
Parents: 5a540c3
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 8 16:30:08 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 8 16:30:08 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   9 ++
 .../hdfs/client/HdfsClientConfigKeys.java   |   9 +-
 .../hdfs/web/resources/AccessTimeParam.java |  49 +++
 .../hdfs/web/resources/AclPermissionParam.java  |  69 ++
 .../hdfs/web/resources/BlockSizeParam.java  |  60 +
 .../hadoop/hdfs/web/resources/BooleanParam.java |  57 
 .../hdfs/web/resources/ConcatSourcesParam.java  |  65 +
 .../hdfs/web/resources/CreateParentParam.java   |  49 +++
 .../hdfs/web/resources/DelegationParam.java |  44 ++
 .../hdfs/web/resources/DeleteOpParam.java   |  82 
 .../hdfs/web/resources/DestinationParam.java|  54 
 .../hadoop/hdfs/web/resources/DoAsParam.java|  41 ++
 .../hadoop/hdfs/web/resources/EnumParam.java|  47 +++
 .../hadoop/hdfs/web/resources/EnumSetParam.java |  92 +
 .../web/resources/ExcludeDatanodesParam.java|  42 ++
 .../hdfs/web/resources/FsActionParam.java   |  58 
 .../hadoop/hdfs/web/resources/GetOpParam.java   | 106 +++
 .../hadoop/hdfs/web/resources/GroupParam.java   |  41 ++
 .../hadoop/hdfs/web/resources/HttpOpParam.java  | 134 +++
 .../hadoop/hdfs/web/resources/IntegerParam.java |  88 
 .../hadoop/hdfs/web/resources/LengthParam.java  |  54 
 .../hadoop/hdfs/web/resources/LongParam.java|  87 
 .../web/resources/ModificationTimeParam.java|  49 +++
 .../hdfs/web/resources/NewLengthParam.java  |  49 +++
 .../hadoop/hdfs/web/resources/OffsetParam.java  |  54 
 .../web/resources/OldSnapshotNameParam.java |  40 ++
 .../hdfs/web/resources/OverwriteParam.java  |  49 +++
 .../hadoop/hdfs/web/resources/OwnerParam.java   |  41 ++
 .../apache/hadoop/hdfs/web/resources/Param.java | 122 +
 .../hdfs/web/resources/PermissionParam.java |  64 +
 .../hadoop/hdfs/web/resources/PostOpParam.java  |  88 
 .../hadoop/hdfs/web/resources/PutOpParam.java   | 114 
 .../hdfs/web/resources/RecursiveParam.java  |  49 +++
 .../web/resources/RenameOptionSetParam.java |  52 +++
 .../hadoop/hdfs/web/resources/RenewerParam.java |  41 ++
 .../hdfs/web/resources/ReplicationParam.java|  60 +
 .../hadoop/hdfs/web/resources/ShortParam.java   |  88 
 .../hdfs/web/resources/SnapshotNameParam.java   |  41 ++
 .../hadoop/hdfs/web/resources/StringParam.java  |  60 +
 .../hdfs/web/resources/TokenArgumentParam.java  |  44 ++
 .../hadoop/hdfs/web/resources/UserParam.java|  82 
 .../hdfs/web/resources/XAttrEncodingParam.java  |  56 
 .../hdfs/web/resources/XAttrNameParam.java  |  44 ++
 .../hdfs/web/resources/XAttrSetFlagParam.java   |  53 
 .../hdfs/web/resources/XAttrValueParam.java |  45 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  18 ++-
 .../hdfs/web/resources/AccessTimeParam.java |  49 ---
 .../hdfs/web/resources/AclPermissionParam.java  |  68 --
 .../hdfs/web/resources/BlockSizeParam.java  |  60 -
 .../hadoop/hdfs/web/resources/BooleanParam.java |  57 
 .../hdfs/web/resources/ConcatSourcesParam.java  |  65 -
 .../hdfs/web/resources/CreateParentParam.java   |  49 ---
 .../hdfs/web/resources/DelegationParam.java |  44 --
 .../hdfs/web/resources/DeleteOpParam.java   |  82 
 .../hdfs/web/resources/DestinationParam.java|  54 
 .../hadoop/hdfs/web/resources/DoAsParam.java|  41 --
 .../hadoop/hdfs/web/resources/EnumParam.java|  47 ---
 .../hadoop/hdfs/web/resources/EnumSetParam.java |  92 -
 .../web/resources/ExcludeDatanodesParam.java|  42 --
 .../hdfs/web/resources/FsActionParam.java   |  58 
 .../hadoop/hdfs/web/resources/GetOpParam.java   | 106 ---
 .../hadoop/hdfs/web/resources/GroupParam.java   |  41 --
 .../hadoop/hdfs/web/resources/HttpOpParam.java  | 134 ---
 .../hadoop/hdfs/web/resources/IntegerParam.java |  88 
 .../hadoop/hdfs/web/resources/LengthParam.java  |  54 
 .../hadoop/hdfs/web/resources/LongParam.java|  87 

[3/3] hadoop git commit: HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. Contributed by Haohui Mai.

2015-04-08 Thread wheat9
HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. Contributed by 
Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88ef75f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88ef75f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88ef75f1

Branch: refs/heads/branch-2
Commit: 88ef75f1679bb0ee3733968f46a791caf850f7c6
Parents: 069366e
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 8 16:30:08 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 8 16:30:57 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   9 ++
 .../hdfs/client/HdfsClientConfigKeys.java   |   9 +-
 .../hdfs/web/resources/AccessTimeParam.java |  49 +++
 .../hdfs/web/resources/AclPermissionParam.java  |  69 ++
 .../hdfs/web/resources/BlockSizeParam.java  |  60 +
 .../hadoop/hdfs/web/resources/BooleanParam.java |  57 
 .../hdfs/web/resources/ConcatSourcesParam.java  |  65 +
 .../hdfs/web/resources/CreateParentParam.java   |  49 +++
 .../hdfs/web/resources/DelegationParam.java |  44 ++
 .../hdfs/web/resources/DeleteOpParam.java   |  82 
 .../hdfs/web/resources/DestinationParam.java|  54 
 .../hadoop/hdfs/web/resources/DoAsParam.java|  41 ++
 .../hadoop/hdfs/web/resources/EnumParam.java|  47 +++
 .../hadoop/hdfs/web/resources/EnumSetParam.java |  92 +
 .../web/resources/ExcludeDatanodesParam.java|  42 ++
 .../hdfs/web/resources/FsActionParam.java   |  58 
 .../hadoop/hdfs/web/resources/GetOpParam.java   | 106 +++
 .../hadoop/hdfs/web/resources/GroupParam.java   |  41 ++
 .../hadoop/hdfs/web/resources/HttpOpParam.java  | 134 +++
 .../hadoop/hdfs/web/resources/IntegerParam.java |  88 
 .../hadoop/hdfs/web/resources/LengthParam.java  |  54 
 .../hadoop/hdfs/web/resources/LongParam.java|  87 
 .../web/resources/ModificationTimeParam.java|  49 +++
 .../hdfs/web/resources/NewLengthParam.java  |  49 +++
 .../hadoop/hdfs/web/resources/OffsetParam.java  |  54 
 .../web/resources/OldSnapshotNameParam.java |  40 ++
 .../hdfs/web/resources/OverwriteParam.java  |  49 +++
 .../hadoop/hdfs/web/resources/OwnerParam.java   |  41 ++
 .../apache/hadoop/hdfs/web/resources/Param.java | 122 +
 .../hdfs/web/resources/PermissionParam.java |  64 +
 .../hadoop/hdfs/web/resources/PostOpParam.java  |  88 
 .../hadoop/hdfs/web/resources/PutOpParam.java   | 114 
 .../hdfs/web/resources/RecursiveParam.java  |  49 +++
 .../web/resources/RenameOptionSetParam.java |  52 +++
 .../hadoop/hdfs/web/resources/RenewerParam.java |  41 ++
 .../hdfs/web/resources/ReplicationParam.java|  60 +
 .../hadoop/hdfs/web/resources/ShortParam.java   |  88 
 .../hdfs/web/resources/SnapshotNameParam.java   |  41 ++
 .../hadoop/hdfs/web/resources/StringParam.java  |  60 +
 .../hdfs/web/resources/TokenArgumentParam.java  |  44 ++
 .../hadoop/hdfs/web/resources/UserParam.java|  82 
 .../hdfs/web/resources/XAttrEncodingParam.java  |  56 
 .../hdfs/web/resources/XAttrNameParam.java  |  44 ++
 .../hdfs/web/resources/XAttrSetFlagParam.java   |  53 
 .../hdfs/web/resources/XAttrValueParam.java |  45 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  18 ++-
 .../hdfs/web/resources/AccessTimeParam.java |  49 ---
 .../hdfs/web/resources/AclPermissionParam.java  |  68 --
 .../hdfs/web/resources/BlockSizeParam.java  |  60 -
 .../hadoop/hdfs/web/resources/BooleanParam.java |  57 
 .../hdfs/web/resources/ConcatSourcesParam.java  |  65 -
 .../hdfs/web/resources/CreateParentParam.java   |  49 ---
 .../hdfs/web/resources/DelegationParam.java |  44 --
 .../hdfs/web/resources/DeleteOpParam.java   |  82 
 .../hdfs/web/resources/DestinationParam.java|  54 
 .../hadoop/hdfs/web/resources/DoAsParam.java|  41 --
 .../hadoop/hdfs/web/resources/EnumParam.java|  47 ---
 .../hadoop/hdfs/web/resources/EnumSetParam.java |  92 -
 .../web/resources/ExcludeDatanodesParam.java|  42 --
 .../hdfs/web/resources/FsActionParam.java   |  58 
 .../hadoop/hdfs/web/resources/GetOpParam.java   | 106 ---
 .../hadoop/hdfs/web/resources/GroupParam.java   |  41 --
 .../hadoop/hdfs/web/resources/HttpOpParam.java  | 134 ---
 .../hadoop/hdfs/web/resources/IntegerParam.java |  88 
 .../hadoop/hdfs/web/resources/LengthParam.java  |  54 
 .../hadoop/hdfs/web/resources/LongParam.java|  

hadoop git commit: YARN-2890. MiniYarnCluster should turn on timeline service if configured to do so. Contributed by Mit Desai.

2015-04-08 Thread hitesh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7e622076d - 55b794e7f


YARN-2890. MiniYarnCluster should turn on timeline service if configured to do 
so. Contributed by Mit Desai.

(cherry picked from commit 265ed1fe804743601a8b62cabc1e4dc2ec8e502f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55b794e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55b794e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55b794e7

Branch: refs/heads/branch-2
Commit: 55b794e7fa205df655c19bbfe1de99091fa9dc64
Parents: 7e62207
Author: Hitesh Shah hit...@apache.org
Authored: Wed Apr 8 14:13:10 2015 -0700
Committer: Hitesh Shah hit...@apache.org
Committed: Wed Apr 8 14:14:06 2015 -0700

--
 .../jobhistory/TestJobHistoryEventHandler.java  |   2 +-
 .../mapred/TestMRTimelineEventHandling.java |  52 -
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../distributedshell/TestDistributedShell.java  |   2 +-
 .../hadoop/yarn/server/MiniYARNCluster.java |   6 +-
 .../hadoop/yarn/server/TestMiniYarnCluster.java | 115 +++
 6 files changed, 172 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b794e7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index 43e3dbe..de260c9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -453,7 +453,7 @@ public class TestJobHistoryEventHandler {
 long currentTime = System.currentTimeMillis();
 try {
   yarnCluster = new MiniYARNCluster(
-TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1, 
true);
+TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1);
   yarnCluster.init(conf);
   yarnCluster.start();
   jheh.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b794e7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
index c2ef128..eab9026 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
@@ -35,6 +35,52 @@ import org.junit.Test;
 public class TestMRTimelineEventHandling {
 
   @Test
+  public void testTimelineServiceStartInMiniCluster() throws Exception {
+Configuration conf = new YarnConfiguration();
+
+/*
+ * Timeline service should not start if the config is set to false
+ * Regardless to the value of MAPREDUCE_JOB_EMIT_TIMELINE_DATA
+ */
+conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
+conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
+MiniMRYarnCluster cluster = null;
+try {
+  cluster = new MiniMRYarnCluster(
+  TestJobHistoryEventHandler.class.getSimpleName(), 1);
+  cluster.init(conf);
+  cluster.start();
+
+  //verify that the timeline service is not started.
+  Assert.assertNull(Timeline Service should not have been started,
+  cluster.getApplicationHistoryServer());
+}
+finally {
+  if(cluster != null) {
+cluster.stop();
+  }
+}
+conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
+conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
+cluster = null;
+try {
+  cluster = new MiniMRYarnCluster(
+  

hadoop git commit: YARN-2890. MiniYarnCluster should turn on timeline service if configured to do so. Contributed by Mit Desai.

2015-04-08 Thread hitesh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 285b31e75 - 265ed1fe8


YARN-2890. MiniYarnCluster should turn on timeline service if configured to do 
so. Contributed by Mit Desai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/265ed1fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/265ed1fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/265ed1fe

Branch: refs/heads/trunk
Commit: 265ed1fe804743601a8b62cabc1e4dc2ec8e502f
Parents: 285b31e
Author: Hitesh Shah hit...@apache.org
Authored: Wed Apr 8 14:13:10 2015 -0700
Committer: Hitesh Shah hit...@apache.org
Committed: Wed Apr 8 14:13:10 2015 -0700

--
 .../jobhistory/TestJobHistoryEventHandler.java  |   2 +-
 .../mapred/TestMRTimelineEventHandling.java |  52 -
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../distributedshell/TestDistributedShell.java  |   2 +-
 .../hadoop/yarn/server/MiniYARNCluster.java |   6 +-
 .../hadoop/yarn/server/TestMiniYarnCluster.java | 115 +++
 6 files changed, 172 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/265ed1fe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index 43e3dbe..de260c9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -453,7 +453,7 @@ public class TestJobHistoryEventHandler {
 long currentTime = System.currentTimeMillis();
 try {
   yarnCluster = new MiniYARNCluster(
-TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1, 
true);
+TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1);
   yarnCluster.init(conf);
   yarnCluster.start();
   jheh.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/265ed1fe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
index c2ef128..eab9026 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
@@ -35,6 +35,52 @@ import org.junit.Test;
 public class TestMRTimelineEventHandling {
 
   @Test
+  public void testTimelineServiceStartInMiniCluster() throws Exception {
+Configuration conf = new YarnConfiguration();
+
+/*
+ * Timeline service should not start if the config is set to false
+ * Regardless to the value of MAPREDUCE_JOB_EMIT_TIMELINE_DATA
+ */
+conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
+conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
+MiniMRYarnCluster cluster = null;
+try {
+  cluster = new MiniMRYarnCluster(
+  TestJobHistoryEventHandler.class.getSimpleName(), 1);
+  cluster.init(conf);
+  cluster.start();
+
+  //verify that the timeline service is not started.
+  Assert.assertNull(Timeline Service should not have been started,
+  cluster.getApplicationHistoryServer());
+}
+finally {
+  if(cluster != null) {
+cluster.stop();
+  }
+}
+conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
+conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
+cluster = null;
+try {
+  cluster = new MiniMRYarnCluster(
+  TestJobHistoryEventHandler.class.getSimpleName(), 1);
+  cluster.init(conf);
+  cluster.start();
+

[2/3] hadoop git commit: HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. Contributed by Haohui Mai.

2015-04-08 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/88ef75f1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
new file mode 100644
index 000..dede6a5
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/** Http POST operation parameter. */
+public class PutOpParam extends HttpOpParamPutOpParam.Op {
+  /** Put operations. */
+  public static enum Op implements HttpOpParam.Op {
+CREATE(true, HttpURLConnection.HTTP_CREATED),
+
+MKDIRS(false, HttpURLConnection.HTTP_OK),
+CREATESYMLINK(false, HttpURLConnection.HTTP_OK),
+RENAME(false, HttpURLConnection.HTTP_OK),
+SETREPLICATION(false, HttpURLConnection.HTTP_OK),
+
+SETOWNER(false, HttpURLConnection.HTTP_OK),
+SETPERMISSION(false, HttpURLConnection.HTTP_OK),
+SETTIMES(false, HttpURLConnection.HTTP_OK),
+
+RENEWDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
+CANCELDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
+
+MODIFYACLENTRIES(false, HttpURLConnection.HTTP_OK),
+REMOVEACLENTRIES(false, HttpURLConnection.HTTP_OK),
+REMOVEDEFAULTACL(false, HttpURLConnection.HTTP_OK),
+REMOVEACL(false, HttpURLConnection.HTTP_OK),
+SETACL(false, HttpURLConnection.HTTP_OK),
+
+SETXATTR(false, HttpURLConnection.HTTP_OK),
+REMOVEXATTR(false, HttpURLConnection.HTTP_OK),
+
+CREATESNAPSHOT(false, HttpURLConnection.HTTP_OK),
+RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK),
+
+NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
+
+final boolean doOutputAndRedirect;
+final int expectedHttpResponseCode;
+final boolean requireAuth;
+
+Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode) {
+  this(doOutputAndRedirect, expectedHttpResponseCode, false);
+}
+
+Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode,
+   final boolean requireAuth) {
+  this.doOutputAndRedirect = doOutputAndRedirect;
+  this.expectedHttpResponseCode = expectedHttpResponseCode;
+  this.requireAuth = requireAuth;
+}
+
+@Override
+public HttpOpParam.Type getType() {
+  return HttpOpParam.Type.PUT;
+}
+
+@Override
+public boolean getRequireAuth() {
+  return requireAuth;
+}
+
+@Override
+public boolean getDoOutput() {
+  return doOutputAndRedirect;
+}
+
+@Override
+public boolean getRedirect() {
+  return doOutputAndRedirect;
+}
+
+@Override
+public int getExpectedHttpResponseCode() {
+  return expectedHttpResponseCode;
+}
+
+@Override
+public String toQueryString() {
+  return NAME + = + this;
+}
+  }
+
+  private static final DomainOp DOMAIN = new DomainOp(NAME, Op.class);
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public PutOpParam(final String str) {
+super(DOMAIN, DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88ef75f1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java
new file mode 100644
index 000..4890a61
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java
@@ -0,0 

[1/3] hadoop git commit: HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. Contributed by Haohui Mai.

2015-04-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 069366e1b - 88ef75f16


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88ef75f1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
deleted file mode 100644
index 94a7f8e..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Integer parameter. */
-abstract class IntegerParam extends ParamInteger, IntegerParam.Domain {
-  IntegerParam(final Domain domain, final Integer value,
-  final Integer min, final Integer max) {
-super(domain, value);
-checkRange(min, max);
-  }
-
-  private void checkRange(final Integer min, final Integer max) {
-if (value == null) {
-  return;
-}
-if (min != null  value  min) {
-  throw new IllegalArgumentException(Invalid parameter range:  + 
getName()
-  +  =  + domain.toString(value) ++ domain.toString(min));
-}
-if (max != null  value  max) {
-  throw new IllegalArgumentException(Invalid parameter range:  + 
getName()
-  +  =  + domain.toString(value) ++ domain.toString(max));
-}
-  }
-  
-  @Override
-  public String toString() {
-return getName() + = + domain.toString(getValue());
-  }
-
-  /** @return the parameter value as a string */
-  @Override
-  public String getValueString() {
-return domain.toString(getValue());
-  }
-
-  /** The domain of the parameter. */
-  static final class Domain extends Param.DomainInteger {
-/** The radix of the number. */
-final int radix;
-
-Domain(final String paramName) {
-  this(paramName, 10);
-}
-
-Domain(final String paramName, final int radix) {
-  super(paramName);
-  this.radix = radix;
-}
-
-@Override
-public String getDomain() {
-  return  + NULL +  | int in radix  + radix + ;
-}
-
-@Override
-Integer parse(final String str) {
-  try{
-return NULL.equals(str) || str == null ? null : Integer.parseInt(str,
-  radix);
-  } catch(NumberFormatException e) {
-throw new IllegalArgumentException(Failed to parse \ + str
-+ \ as a radix- + radix +  integer., e);
-  }
-}
-
-/** Convert an Integer to a String. */ 
-String toString(final Integer n) {
-  return n == null? NULL: Integer.toString(n, radix);
-}
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88ef75f1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
deleted file mode 100644
index 5a609ee..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing 

hadoop git commit: Revert HDFS-7813.

2015-04-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk bd4c99bec - 82d56b337


Revert HDFS-7813.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82d56b33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82d56b33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82d56b33

Branch: refs/heads/trunk
Commit: 82d56b337d468f4065df5005f9f67487ac97d2d7
Parents: bd4c99b
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 8 16:02:45 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 8 16:02:45 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 ---
 .../org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java   | 1 -
 2 files changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82d56b33/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d4a8c0b..b203770 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1241,9 +1241,6 @@ Release 2.7.0 - UNRELEASED
 HDFS-7814. Fix usage string of storageType parameter for
 dfsadmin -setSpaceQuota/clrSpaceQuota. (Xiaoyu Yao via cnauroth)
 
-HDFS-7813. TestDFSHAAdminMiniCluster#testFencer testcase is failing
-frequently. (Rakesh R via cnauroth)
-
 HDFS-7009. Active NN and standby NN have different live nodes.
 (Ming Ma via cnauroth)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82d56b33/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
index 2910004..ee1c184 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
@@ -155,7 +155,6 @@ public class TestDFSHAAdminMiniCluster {
 tool.setConf(conf);
 assertEquals(0, runTool(-transitionToActive, nn1));
 assertEquals(0, runTool(-failover, nn1, nn2));
-assertEquals(0, runTool(-failover, nn2, nn1));
 
 // Test failover with fencer and nameservice
 assertEquals(0, runTool(-ns, minidfs-ns, -failover, nn2, nn1));



hadoop git commit: Revert HDFS-7808.

2015-04-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8104d5226 - 35816c438


Revert HDFS-7808.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35816c43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35816c43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35816c43

Branch: refs/heads/branch-2
Commit: 35816c438da2685b2298cd73b26b716d185bcbe4
Parents: 8104d52
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 8 15:59:55 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 8 16:00:10 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ---
 .../apache/hadoop/hdfs/tools/DFSHAAdmin.java| 20 
 .../hadoop/hdfs/tools/TestDFSHAAdmin.java   | 20 
 .../hdfs/tools/TestDFSHAAdminMiniCluster.java   |  3 +++
 4 files changed, 43 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35816c43/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 32957a5..4612a1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -928,9 +928,6 @@ Release 2.7.0 - UNRELEASED
 HDFS-6662. WebHDFS cannot open a file if its path contains %.
 (Gerson Carlos via wheat9)
 
-HDFS-7808. Remove obsolete -ns options in in DFSHAAdmin.java.
-(Arshad Mohammad via wheat9)
-
 HDFS-7788. Post-2.6 namenode may not start up with an image containing
 inodes created with an old release. (Rushabh Shah via kihwal)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35816c43/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
index 6b6fb30..e9c611d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.io.PrintStream;
+import java.util.Arrays;
 import java.util.Collection;
 
 import org.apache.commons.logging.Log;
@@ -97,6 +98,25 @@ public class DFSHAAdmin extends HAAdmin {
   printUsage(errOut);
   return -1;
 }
+
+int i = 0;
+String cmd = argv[i++];
+
+if (-ns.equals(cmd)) {
+  if (i == argv.length) {
+errOut.println(Missing nameservice ID);
+printUsage(errOut);
+return -1;
+  }
+  nameserviceId = argv[i++];
+  if (i = argv.length) {
+errOut.println(Missing command);
+printUsage(errOut);
+return -1;
+  }
+  argv = Arrays.copyOfRange(argv, i, argv.length);
+}
+
 return super.runCmd(argv);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35816c43/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
index 8ecc71a..33da4d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
@@ -147,6 +147,17 @@ public class TestDFSHAAdmin {
   }
   
   @Test
+  public void testNameserviceOption() throws Exception {
+assertEquals(-1, runTool(-ns));
+assertOutputContains(Missing nameservice ID);
+assertEquals(-1, runTool(-ns, ns1));
+assertOutputContains(Missing command);
+// ns1 isn't defined but we check this lazily and help doesn't use the ns
+assertEquals(0, runTool(-ns, ns1, -help, transitionToActive));
+assertOutputContains(Transitions the service into Active);
+  }
+
+  @Test
   public void testNamenodeResolution() throws Exception {
 
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
 assertEquals(0, runTool(-getServiceState, nn1));
@@ -268,6 +279,15 @@ public class TestDFSHAAdmin {
   }
 
   @Test
+  public void testFailoverWithFencerAndNameservice() throws Exception {
+
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+HdfsConfiguration conf = getHAConf();
+

hadoop git commit: HDFS-7979. Initialize block report IDs with a random number.

2015-04-08 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eafee9a04 - 351fac25a


HDFS-7979. Initialize block report IDs with a random number.

(cherry picked from commit b1e059089d6a5b2b7006d7d384c6df81ed268bd9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/351fac25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/351fac25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/351fac25

Branch: refs/heads/branch-2
Commit: 351fac25a994de42ec1b1218513326884a1dde43
Parents: eafee9a
Author: Andrew Wang andrew.w...@cloudera.com
Authored: Wed Apr 8 21:43:42 2015 -0700
Committer: Andrew Wang andrew.w...@cloudera.com
Committed: Wed Apr 8 21:43:57 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../hadoop/hdfs/server/datanode/BPServiceActor.java | 16 +---
 .../hdfs/server/protocol/BlockReportContext.java|  3 +++
 3 files changed, 14 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/351fac25/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ccec529..5a75f383 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -89,6 +89,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. (wheat9)
 
+HDFS-7979. Initialize block report IDs with a random number. (wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/351fac25/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index dd6f9ac..ba5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -118,6 +118,7 @@ class BPServiceActor implements Runnable {
   private volatile boolean shouldServiceRun = true;
   private final DataNode dn;
   private final DNConf dnConf;
+  private long prevBlockReportId;
 
   private DatanodeRegistration bpRegistration;
   final LinkedListBPServiceActorAction bpThreadQueue 
@@ -128,6 +129,7 @@ class BPServiceActor implements Runnable {
 this.dn = bpos.getDataNode();
 this.nnAddr = nnAddr;
 this.dnConf = dn.getDnConf();
+prevBlockReportId = DFSUtil.getRandom().nextLong();
   }
 
   boolean isAlive() {
@@ -434,15 +436,15 @@ class BPServiceActor implements Runnable {
 return sendImmediateIBR;
   }
 
-  private long prevBlockReportId = 0;
-
   private long generateUniqueBlockReportId() {
-long id = System.nanoTime();
-if (id = prevBlockReportId) {
-  id = prevBlockReportId + 1;
+// Initialize the block report ID the first time through.
+// Note that 0 is used on the NN to indicate uninitialized, so we should
+// not send a 0 value ourselves.
+prevBlockReportId++;
+while (prevBlockReportId == 0) {
+  prevBlockReportId = DFSUtil.getRandom().nextLong();
 }
-prevBlockReportId = id;
-return id;
+return prevBlockReportId;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/351fac25/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
index a084a81..d0b0282 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hdfs.server.protocol;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 /**
  * The context of the block report.
  *
@@ -27,6 +29,7 @@ package org.apache.hadoop.hdfs.server.protocol;
  * of RPCs which this block report is split into, and the index into that
  * total for the current RPC.
  */
+@InterfaceAudience.Private
 public class BlockReportContext {
   private final int 

hadoop git commit: YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore called. Contributed by Bibin A Chundatt.

2015-04-08 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f9fbde307 - ac32fa187


YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore called. 
Contributed by Bibin A Chundatt.

(cherry picked from commit dd852f5b8c8fe9e52d15987605f36b5b60f02701)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac32fa18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac32fa18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac32fa18

Branch: refs/heads/branch-2
Commit: ac32fa187cf37e5a51fd579e052105662ab3c411
Parents: f9fbde3
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Apr 8 15:56:18 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Apr 8 15:56:33 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../yarn/server/nodemanager/NodeManager.java| 22 +++-
 2 files changed, 15 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac32fa18/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a4673bd..d9e1754 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -105,6 +105,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3110. Few issues in ApplicationHistory web ui. (Naganarasimha G R via 
xgong)
 
+YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore 
called.
+(Bibin A Chundatt via ozawa)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac32fa18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 5727f10..d54180a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -176,16 +176,18 @@ public class NodeManager extends CompositeService
 
   private void stopRecoveryStore() throws IOException {
 nmStore.stop();
-if (context.getDecommissioned()  nmStore.canRecover()) {
-  LOG.info(Removing state store due to decommission);
-  Configuration conf = getConfig();
-  Path recoveryRoot = new Path(
-  conf.get(YarnConfiguration.NM_RECOVERY_DIR));
-  LOG.info(Removing state store at  + recoveryRoot
-  +  due to decommission);
-  FileSystem recoveryFs = FileSystem.getLocal(conf);
-  if (!recoveryFs.delete(recoveryRoot, true)) {
-LOG.warn(Unable to delete  + recoveryRoot);
+if (null != context) {
+  if (context.getDecommissioned()  nmStore.canRecover()) {
+LOG.info(Removing state store due to decommission);
+Configuration conf = getConfig();
+Path recoveryRoot =
+new Path(conf.get(YarnConfiguration.NM_RECOVERY_DIR));
+LOG.info(Removing state store at  + recoveryRoot
++  due to decommission);
+FileSystem recoveryFs = FileSystem.getLocal(conf);
+if (!recoveryFs.delete(recoveryRoot, true)) {
+  LOG.warn(Unable to delete  + recoveryRoot);
+}
   }
 }
   }



hadoop git commit: YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore called. Contributed by Bibin A Chundatt.

2015-04-08 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk ab04ff9ef - dd852f5b8


YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore called. 
Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd852f5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd852f5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd852f5b

Branch: refs/heads/trunk
Commit: dd852f5b8c8fe9e52d15987605f36b5b60f02701
Parents: ab04ff9
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Apr 8 15:56:18 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Apr 8 15:56:18 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../yarn/server/nodemanager/NodeManager.java| 22 +++-
 2 files changed, 15 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd852f5b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 01d3429..d5f6ce0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -153,6 +153,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3110. Few issues in ApplicationHistory web ui. (Naganarasimha G R via 
xgong)
 
+YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore 
called.
+(Bibin A Chundatt via ozawa)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd852f5b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index f95a7ad..9831fc4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -177,16 +177,18 @@ public class NodeManager extends CompositeService
 
   private void stopRecoveryStore() throws IOException {
 nmStore.stop();
-if (context.getDecommissioned()  nmStore.canRecover()) {
-  LOG.info(Removing state store due to decommission);
-  Configuration conf = getConfig();
-  Path recoveryRoot = new Path(
-  conf.get(YarnConfiguration.NM_RECOVERY_DIR));
-  LOG.info(Removing state store at  + recoveryRoot
-  +  due to decommission);
-  FileSystem recoveryFs = FileSystem.getLocal(conf);
-  if (!recoveryFs.delete(recoveryRoot, true)) {
-LOG.warn(Unable to delete  + recoveryRoot);
+if (null != context) {
+  if (context.getDecommissioned()  nmStore.canRecover()) {
+LOG.info(Removing state store due to decommission);
+Configuration conf = getConfig();
+Path recoveryRoot =
+new Path(conf.get(YarnConfiguration.NM_RECOVERY_DIR));
+LOG.info(Removing state store at  + recoveryRoot
++  due to decommission);
+FileSystem recoveryFs = FileSystem.getLocal(conf);
+if (!recoveryFs.delete(recoveryRoot, true)) {
+  LOG.warn(Unable to delete  + recoveryRoot);
+}
   }
 }
   }



hadoop git commit: HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from NameNode (Contributed by Vinayakumar B) Added missed file

2015-04-08 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 d63800e84 - ead48867a


HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from 
NameNode (Contributed by Vinayakumar B)
Added missed file


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ead48867
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ead48867
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ead48867

Branch: refs/heads/HDFS-7285
Commit: ead48867a5aa9ad608c93fa45e2c51022f62ced5
Parents: d63800e
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 8 14:23:03 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Apr 8 14:23:03 2015 +0530

--
 .../org/apache/hadoop/hdfs/protocol/ECInfo.java | 41 
 1 file changed, 41 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ead48867/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
new file mode 100644
index 000..ca642c2
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.io.erasurecode.ECSchema;
+
+/**
+ * Class to provide information, such as ECSchema, for a file/block.
+ */
+public class ECInfo {
+  private final String src;
+  private final ECSchema schema;
+
+  public ECInfo(String src, ECSchema schema) {
+this.src = src;
+this.schema = schema;
+  }
+
+  public String getSrc() {
+return src;
+  }
+
+  public ECSchema getSchema() {
+return schema;
+  }
+}



[1/2] hadoop git commit: HDFS-8074 Define a system-wide default EC schema. Contributed by Kai Zheng

2015-04-08 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 ead48867a - d022be287


HDFS-8074 Define a system-wide default EC schema. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ca56197
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ca56197
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ca56197

Branch: refs/heads/HDFS-7285
Commit: 7ca56197d6b7f3074800c7b701f81b70b442a223
Parents: ead4886
Author: Kai Zheng kai.zh...@intel.com
Authored: Thu Apr 9 01:30:02 2015 +0800
Committer: Kai Zheng kai.zh...@intel.com
Committed: Thu Apr 9 01:30:02 2015 +0800

--
 .../src/main/conf/ecschema-def.xml  |  5 --
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 57 +-
 .../hdfs/server/namenode/ECSchemaManager.java   | 62 
 3 files changed, 117 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca56197/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml 
b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
index e619485..e36d386 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
+++ b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
@@ -27,11 +27,6 @@ You can modify and remove those not used yet, or add new 
ones.
 --
 
 schemas
-  schema name=RS-6-3
-k6/k
-m3/m
-codecRS/codec
-  /schema
   schema name=RS-10-4
 k10/k
 m4/m

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca56197/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 27be00e..8c3310e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -23,12 +23,12 @@ import java.util.Map;
 /**
  * Erasure coding schema to housekeeper relevant information.
  */
-public class ECSchema {
+public final class ECSchema {
   public static final String NUM_DATA_UNITS_KEY = k;
   public static final String NUM_PARITY_UNITS_KEY = m;
   public static final String CODEC_NAME_KEY = codec;
   public static final String CHUNK_SIZE_KEY = chunkSize;
-  public static final int DEFAULT_CHUNK_SIZE = 64 * 1024; // 64K
+  public static final int DEFAULT_CHUNK_SIZE = 256 * 1024; // 256K
 
   private String schemaName;
   private String codecName;
@@ -82,6 +82,18 @@ public class ECSchema {
   }
 
   /**
+   * Constructor with key parameters provided.
+   * @param schemaName
+   * @param codecName
+   * @param numDataUnits
+   * @param numParityUnits
+   */
+  public ECSchema(String schemaName, String codecName,
+  int numDataUnits, int numParityUnits) {
+this(schemaName, codecName, numDataUnits, numParityUnits, null);
+  }
+
+  /**
* Constructor with key parameters provided. Note the options may contain
* additional information for the erasure codec to interpret further.
* @param schemaName
@@ -200,4 +212,45 @@ public class ECSchema {
 
 return sb.toString();
   }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+
+ECSchema ecSchema = (ECSchema) o;
+
+if (numDataUnits != ecSchema.numDataUnits) {
+  return false;
+}
+if (numParityUnits != ecSchema.numParityUnits) {
+  return false;
+}
+if (chunkSize != ecSchema.chunkSize) {
+  return false;
+}
+if (!schemaName.equals(ecSchema.schemaName)) {
+  return false;
+}
+if (!codecName.equals(ecSchema.codecName)) {
+  return false;
+}
+return options.equals(ecSchema.options);
+  }
+
+  @Override
+  public int hashCode() {
+int result = schemaName.hashCode();
+result = 31 * result + codecName.hashCode();
+result = 31 * result + options.hashCode();
+result = 31 * result + numDataUnits;
+result = 31 * result + numParityUnits;
+result = 31 * result + chunkSize;
+
+return result;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca56197/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ECSchemaManager.java
--
diff 

[2/2] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt

2015-04-08 Thread drankye
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d022be28
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d022be28
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d022be28

Branch: refs/heads/HDFS-7285
Commit: d022be28718e7c2f3fe76bdb76b7e15c8d9eff0a
Parents: 7ca5619
Author: Kai Zheng kai.zh...@intel.com
Authored: Thu Apr 9 01:31:52 2015 +0800
Committer: Kai Zheng kai.zh...@intel.com
Committed: Thu Apr 9 01:31:52 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d022be28/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 7423033..5078a15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -52,4 +52,6 @@
 manage EC zones (Zhe Zhang)
 
 HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
-NameNode (vinayakumarb)
\ No newline at end of file
+NameNode (vinayakumarb)
+
+HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
\ No newline at end of file



hadoop git commit: HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from NameNode (Contributed by Vinayakumar B)

2015-04-08 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 196774350 - d63800e84


HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from 
NameNode (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d63800e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d63800e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d63800e8

Branch: refs/heads/HDFS-7285
Commit: d63800e84e64d4ce91953efe5604c3129d9f47c0
Parents: 1967743
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 8 12:48:59 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Apr 8 12:48:59 2015 +0530

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 ++-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 14 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java| 10 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 19 
 .../ClientNamenodeProtocolTranslatorPB.java | 18 
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 47 +++-
 .../hdfs/server/namenode/FSNamesystem.java  | 31 +
 .../hdfs/server/namenode/NameNodeRpcServer.java |  7 +++
 .../src/main/proto/ClientNamenodeProtocol.proto | 10 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   | 28 
 .../hadoop/hdfs/TestErasureCodingZones.java | 38 +++-
 11 files changed, 223 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d63800e8/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9927ccf..7423033 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -49,4 +49,7 @@
 (Hui Zheng via Zhe Zhang)
 
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
-manage EC zones (Zhe Zhang)
\ No newline at end of file
+manage EC zones (Zhe Zhang)
+
+HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
+NameNode (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d63800e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index bfb1022..a4cfc55 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -157,6 +157,7 @@ import 
org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ECInfo;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -3460,6 +3461,19 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  public ECInfo getErasureCodingInfo(String src) throws IOException {
+checkOpen();
+TraceScope scope = getPathTraceScope(getErasureCodingInfo, src);
+try {
+  return namenode.getErasureCodingInfo(src);
+} catch (RemoteException re) {
+  throw re.unwrapRemoteException(AccessControlException.class,
+  FileNotFoundException.class, UnresolvedPathException.class);
+} finally {
+  scope.close();
+}
+  }
+
   public DFSInotifyEventInputStream getInotifyEventStream() throws IOException 
{
 return new DFSInotifyEventInputStream(traceSampler, namenode);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d63800e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 8efe344..45d92f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1464,4 +1464,14 @@ public interface 

hadoop git commit: HADOOP-11781. fix race conditions and add URL support to smart-apply-patch.sh (Raymie Stata via aw)

2015-04-08 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk dd852f5b8 - f4b3fc562


HADOOP-11781. fix race conditions and add URL support to smart-apply-patch.sh 
(Raymie Stata via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4b3fc56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4b3fc56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4b3fc56

Branch: refs/heads/trunk
Commit: f4b3fc56210824037344d403f1ad0f033961a2db
Parents: dd852f5
Author: Allen Wittenauer a...@apache.org
Authored: Wed Apr 8 10:05:25 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Wed Apr 8 10:05:25 2015 -0700

--
 dev-support/smart-apply-patch.sh| 45 
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 2 files changed, 40 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4b3fc56/dev-support/smart-apply-patch.sh
--
diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh
index 03bc4f8..449fc22 100755
--- a/dev-support/smart-apply-patch.sh
+++ b/dev-support/smart-apply-patch.sh
@@ -11,8 +11,6 @@
 #   See the License for the specific language governing permissions and
 #   limitations under the License.
 
-set -e
-
 #
 # Determine if the patch file is a git diff file with prefixes.
 # These files are generated via git diff *without* the --no-prefix option.
@@ -54,6 +52,7 @@ if [ -z $PATCH_FILE ]; then
   exit 1
 fi
 
+TMPDIR=${TMPDIR:-/tmp}
 PATCH=${PATCH:-patch} # allow overriding patch binary
 
 # Cleanup handler for temporary files
@@ -66,11 +65,41 @@ trap cleanup 1 HUP INT QUIT TERM
 
 # Allow passing - for stdin patches
 if [ $PATCH_FILE == - ]; then
-  PATCH_FILE=/tmp/tmp.in.$$
+  PATCH_FILE=$TMPDIR/smart-apply.in.$RANDOM
   cat /dev/fd/0  $PATCH_FILE
   TOCLEAN=$TOCLEAN $PATCH_FILE
 fi
 
+ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$'
+if [[ ${PATCH_FILE} =~ ^http || ${PATCH_FILE} =~ ${ISSUE_RE} ]]; then
+  # Allow downloading of patches
+  PFILE=$TMPDIR/smart-apply.in.$RANDOM
+  TOCLEAN=$TOCLEAN $PFILE
+  if [[ ${PATCH_FILE} =~ ^http ]]; then
+patchURL=${PATCH_FILE}
+  else # Get URL of patch from JIRA
+wget -q -O ${PFILE} http://issues.apache.org/jira/browse/${PATCH_FILE};
+if [[ $? != 0 ]]; then
+  echo Unable to determine what ${PATCH_FILE} may reference. 12
+  cleanup 1
+elif [[ $(grep -c 'Patch Available' ${PFILE}) == 0 ]]; then
+  echo ${PATCH_FILE} is not \Patch Available\.  Exiting. 12
+  cleanup 1
+fi
+relativePatchURL=$(grep -o '/jira/secure/attachment/[0-9]*/[^]*' 
${PFILE} | grep -v -e 'htm[l]*$' | sort | tail -1 | grep -o 
'/jira/secure/attachment/[0-9]*/[^]*')
+patchURL=http://issues.apache.org${relativePatchURL};
+  fi
+  if [[ -n $DRY_RUN ]]; then
+echo Downloading ${patchURL}
+  fi
+  wget -q -O ${PFILE} ${patchURL}
+  if [[ $? != 0 ]]; then
+echo ${PATCH_FILE} could not be downloaded. 12
+cleanup 1
+  fi
+  PATCH_FILE=${PFILE}
+fi
+
 # Special case for git-diff patches without --no-prefix
 if is_git_diff_with_prefix $PATCH_FILE; then
   GIT_FLAGS=--binary -p1 -v
@@ -85,7 +114,7 @@ if is_git_diff_with_prefix $PATCH_FILE; then
 fi
 
 # Come up with a list of changed files into $TMP
-TMP=/tmp/tmp.paths.$$
+TMP=$TMPDIR/smart-apply.paths.$RANDOM
 TOCLEAN=$TOCLEAN $TMP
 
 if $PATCH -p0 -E --dry-run  $PATCH_FILE 21  $TMP; then
@@ -94,10 +123,10 @@ if $PATCH -p0 -E --dry-run  $PATCH_FILE 21  $TMP; then
   # is adding new files and they would apply anywhere. So try to guess the
   # correct place to put those files.
 
-  TMP2=/tmp/tmp.paths.2.$$
+  TMP2=$TMPDIR/smart-apply.paths.2.$RANDOM
   TOCLEAN=$TOCLEAN $TMP2
 
-  egrep '^patching file |^checking file ' $TMP | awk '{print $3}' | grep -v 
/dev/null | sort | uniq  $TMP2
+  egrep '^patching file |^checking file ' $TMP | awk '{print $3}' | grep -v 
/dev/null | sort -u  $TMP2
 
   if [ ! -s $TMP2 ]; then
 echo Error: Patch dryrun couldn't detect changes the patch would make. 
Exiting.
@@ -125,8 +154,8 @@ if $PATCH -p0 -E --dry-run  $PATCH_FILE 21  $TMP; then
   sed -i -e 's,^[ab]/,,' $TMP2
 fi
 
-PREFIX_DIRS_AND_FILES=$(cut -d '/' -f 1 | sort | uniq)
-
+PREFIX_DIRS_AND_FILES=$(cut -d '/' -f 1 $TMP2 | sort -u)
+ 
 # if we are at the project root then nothing more to do
 if [[ -d hadoop-common-project ]]; then
   echo Looks like this is being run at project root

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4b3fc56/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 

[3/3] hadoop git commit: HDFS-8072. Reserved RBW space is not released if client terminates while writing block. (Arpit Agarwal)

2015-04-08 Thread arp
HDFS-8072. Reserved RBW space is not released if client terminates while 
writing block. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12739b54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12739b54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12739b54

Branch: refs/heads/branch-2.7
Commit: 12739b541bf9cbe39b352c7651eef91557209b4e
Parents: ffa3f3a
Author: Arpit Agarwal a...@apache.org
Authored: Wed Apr 8 11:38:21 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Wed Apr 8 11:38:42 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/datanode/BlockReceiver.java |  1 +
 .../hdfs/server/datanode/ReplicaInPipeline.java |  6 ++
 .../datanode/ReplicaInPipelineInterface.java|  5 ++
 .../server/datanode/SimulatedFSDataset.java |  4 ++
 .../extdataset/ExternalReplicaInPipeline.java   |  4 ++
 .../fsdataset/impl/TestRbwSpaceReservation.java | 67 +---
 7 files changed, 81 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12739b54/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c36ea82..3fe17cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -974,6 +974,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in
 platform-specific format. (Xiaoyu Yao via cnauroth)
 
+HDFS-8072. Reserved RBW space is not released if client terminates while
+writing block. (Arpit Agarwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12739b54/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 58cb8b1..c0be956 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -817,6 +817,7 @@ class BlockReceiver implements Closeable {
   }
 
 } catch (IOException ioe) {
+  replicaInfo.releaseAllBytesReserved();
   if (datanode.isRestarting()) {
 // Do not throw if shutting down for restart. Otherwise, it will cause
 // premature termination of responder.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12739b54/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index 6a26640..cc55f85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -148,6 +148,12 @@ public class ReplicaInPipeline extends ReplicaInfo
 return bytesReserved;
   }
   
+  @Override
+  public void releaseAllBytesReserved() {  // ReplicaInPipelineInterface
+getVolume().releaseReservedSpace(bytesReserved);
+bytesReserved = 0;
+  }
+
   @Override // ReplicaInPipelineInterface
   public synchronized void setLastChecksumAndDataLen(long dataLength, byte[] 
lastChecksum) {
 this.bytesOnDisk = dataLength;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12739b54/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
index 7f08b81..0263d0f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
+++ 

[2/3] hadoop git commit: HDFS-8072. Reserved RBW space is not released if client terminates while writing block. (Arpit Agarwal)

2015-04-08 Thread arp
HDFS-8072. Reserved RBW space is not released if client terminates while 
writing block. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0324738
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0324738
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0324738

Branch: refs/heads/branch-2
Commit: f0324738c9db4f45d2b1ec5cfb46c5f2b7669571
Parents: 5f59e62
Author: Arpit Agarwal a...@apache.org
Authored: Wed Apr 8 11:38:21 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Wed Apr 8 11:38:30 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/datanode/BlockReceiver.java |  1 +
 .../hdfs/server/datanode/ReplicaInPipeline.java |  6 ++
 .../datanode/ReplicaInPipelineInterface.java|  5 ++
 .../server/datanode/SimulatedFSDataset.java |  4 ++
 .../extdataset/ExternalReplicaInPipeline.java   |  4 ++
 .../fsdataset/impl/TestRbwSpaceReservation.java | 67 +---
 7 files changed, 81 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0324738/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8290bcc..e767f45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1102,6 +1102,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in
 platform-specific format. (Xiaoyu Yao via cnauroth)
 
+HDFS-8072. Reserved RBW space is not released if client terminates while
+writing block. (Arpit Agarwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0324738/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 58cb8b1..c0be956 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -817,6 +817,7 @@ class BlockReceiver implements Closeable {
   }
 
 } catch (IOException ioe) {
+  replicaInfo.releaseAllBytesReserved();
   if (datanode.isRestarting()) {
 // Do not throw if shutting down for restart. Otherwise, it will cause
 // premature termination of responder.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0324738/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index 6a26640..cc55f85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -148,6 +148,12 @@ public class ReplicaInPipeline extends ReplicaInfo
 return bytesReserved;
   }
   
+  @Override
+  public void releaseAllBytesReserved() {  // ReplicaInPipelineInterface
+getVolume().releaseReservedSpace(bytesReserved);
+bytesReserved = 0;
+  }
+
   @Override // ReplicaInPipelineInterface
   public synchronized void setLastChecksumAndDataLen(long dataLength, byte[] 
lastChecksum) {
 this.bytesOnDisk = dataLength;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0324738/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
index 7f08b81..0263d0f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
+++ 

[1/3] hadoop git commit: HDFS-8072. Reserved RBW space is not released if client terminates while writing block. (Arpit Agarwal)

2015-04-08 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5f59e621b - f0324738c
  refs/heads/branch-2.7 ffa3f3a10 - 12739b541
  refs/heads/trunk ba9ee22ca - 608c49984


HDFS-8072. Reserved RBW space is not released if client terminates while 
writing block. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/608c4998
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/608c4998
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/608c4998

Branch: refs/heads/trunk
Commit: 608c4998419c18fd95019b28cc56b5bd5aa4cc01
Parents: ba9ee22
Author: Arpit Agarwal a...@apache.org
Authored: Wed Apr 8 11:38:21 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Wed Apr 8 11:38:21 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/datanode/BlockReceiver.java |  1 +
 .../hdfs/server/datanode/ReplicaInPipeline.java |  6 ++
 .../datanode/ReplicaInPipelineInterface.java|  5 ++
 .../server/datanode/SimulatedFSDataset.java |  4 ++
 .../extdataset/ExternalReplicaInPipeline.java   |  4 ++
 .../fsdataset/impl/TestRbwSpaceReservation.java | 67 +---
 7 files changed, 81 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/608c4998/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 84e382a..91a16bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1403,6 +1403,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in
 platform-specific format. (Xiaoyu Yao via cnauroth)
 
+HDFS-8072. Reserved RBW space is not released if client terminates while
+writing block. (Arpit Agarwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608c4998/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 58cb8b1..c0be956 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -817,6 +817,7 @@ class BlockReceiver implements Closeable {
   }
 
 } catch (IOException ioe) {
+  replicaInfo.releaseAllBytesReserved();
   if (datanode.isRestarting()) {
 // Do not throw if shutting down for restart. Otherwise, it will cause
 // premature termination of responder.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608c4998/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index 6a26640..cc55f85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -148,6 +148,12 @@ public class ReplicaInPipeline extends ReplicaInfo
 return bytesReserved;
   }
   
+  @Override
+  public void releaseAllBytesReserved() {  // ReplicaInPipelineInterface
+getVolume().releaseReservedSpace(bytesReserved);
+bytesReserved = 0;
+  }
+
   @Override // ReplicaInPipelineInterface
   public synchronized void setLastChecksumAndDataLen(long dataLength, byte[] 
lastChecksum) {
 this.bytesOnDisk = dataLength;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608c4998/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
index 7f08b81..0263d0f 100644
--- 

hadoop git commit: HDFS-8079. Move CorruptFileBlockIterator to a new hdfs.client.impl package.

2015-04-08 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/trunk 608c49984 - c931a3c77


HDFS-8079. Move CorruptFileBlockIterator to a new hdfs.client.impl package.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c931a3c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c931a3c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c931a3c7

Branch: refs/heads/trunk
Commit: c931a3c7760e417f593f5e73f4cf55f6fe1defc5
Parents: 608c499
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Wed Apr 8 11:50:52 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Wed Apr 8 11:50:52 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../main/java/org/apache/hadoop/fs/Hdfs.java|   2 +-
 .../hadoop/hdfs/CorruptFileBlockIterator.java   | 104 --
 .../hadoop/hdfs/DistributedFileSystem.java  |   1 +
 .../client/impl/CorruptFileBlockIterator.java   | 105 +++
 .../namenode/TestListCorruptFileBlocks.java |   2 +-
 6 files changed, 111 insertions(+), 106 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c931a3c7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 91a16bc..c983849 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -391,6 +391,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8080. Separate JSON related routines used by WebHdfsFileSystem to a
 package local class. (wheat9)
 
+HDFS-8085. Move CorruptFileBlockIterator to a new hdfs.client.impl package.
+(szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c931a3c7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
index 8c09193..aaaff25 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -35,13 +35,13 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
-import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c931a3c7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
deleted file mode 100644
index 1597b87..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import java.io.IOException;
-import 

hadoop git commit: HDFS-8079. Move CorruptFileBlockIterator to a new hdfs.client.impl package.

2015-04-08 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f0324738c - de7f9a8bc


HDFS-8079. Move CorruptFileBlockIterator to a new hdfs.client.impl package.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de7f9a8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de7f9a8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de7f9a8b

Branch: refs/heads/branch-2
Commit: de7f9a8bcc2f5c68caed97c052c1b9ed8a1473e0
Parents: f032473
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Wed Apr 8 11:50:52 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Wed Apr 8 11:51:43 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../main/java/org/apache/hadoop/fs/Hdfs.java|   2 +-
 .../hadoop/hdfs/CorruptFileBlockIterator.java   | 104 --
 .../hadoop/hdfs/DistributedFileSystem.java  |   1 +
 .../client/impl/CorruptFileBlockIterator.java   | 105 +++
 .../namenode/TestListCorruptFileBlocks.java |   2 +-
 6 files changed, 111 insertions(+), 106 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de7f9a8b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e767f45..7736f62 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -76,6 +76,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8080. Separate JSON related routines used by WebHdfsFileSystem to a
 package local class. (wheat9)
 
+HDFS-8085. Move CorruptFileBlockIterator to a new hdfs.client.impl package.
+(szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de7f9a8b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
index 8c09193..aaaff25 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -35,13 +35,13 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
-import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de7f9a8b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
deleted file mode 100644
index 1597b87..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import java.io.IOException;
-import 

hadoop git commit: YARN-3459. Fix failiure of TestLog4jWarningErrorMetricsAppender. (Varun Vasudev via wangda)

2015-04-08 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ac32fa187 - 97244b143


YARN-3459. Fix failiure of TestLog4jWarningErrorMetricsAppender. (Varun Vasudev 
via wangda)

(cherry picked from commit 7af086a515d573dc90ea4deec7f4e3f23622e0e8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97244b14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97244b14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97244b14

Branch: refs/heads/branch-2
Commit: 97244b143ca72cb9dcd86b428f9408928985b545
Parents: ac32fa1
Author: Wangda Tan wan...@apache.org
Authored: Wed Apr 8 10:57:48 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Apr 8 10:58:38 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97244b14/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d9e1754..7304071 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -108,6 +108,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore 
called.
 (Bibin A Chundatt via ozawa)
 
+YARN-3459. Fix failiure of TestLog4jWarningErrorMetricsAppender.
+(Varun Vasudev via wangda)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97244b14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
index 61d4c4c..e788e80 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
@@ -84,7 +84,7 @@ public class TestLog4jWarningErrorMetricsAppender {
 Assert.assertEquals(1, appender.getErrorCounts(cutoff).get(0).longValue());
 Assert.assertEquals(1, appender.getErrorMessagesAndCounts(cutoff).get(0)
   .size());
-Thread.sleep(2000);
+Thread.sleep(3000);
 Assert.assertEquals(1, appender.getErrorCounts(cutoff).size());
 Assert.assertEquals(0, appender.getErrorCounts(cutoff).get(0).longValue());
 Assert.assertEquals(0, appender.getErrorMessagesAndCounts(cutoff).get(0)



hadoop git commit: YARN-2901 addendum: Fixed findbugs warning caused by previously patch

2015-04-08 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 97244b143 - 5f59e621b


YARN-2901 addendum: Fixed findbugs warning caused by previously patch

(cherry picked from commit ba9ee22ca4ed2c5ff447b66b2e2dfe25f6880fe0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f59e621
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f59e621
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f59e621

Branch: refs/heads/branch-2
Commit: 5f59e621ba8851be981d2533935c2d56c9a6757b
Parents: 97244b1
Author: Wangda Tan wan...@apache.org
Authored: Wed Apr 8 11:02:06 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Apr 8 11:02:54 2015 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f59e621/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 943ecb0..375d19c 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -461,5 +461,14 @@
 Method name=recoverContainersOnNode /
 Bug pattern=RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE /
   /Match
-
+  
+  !-- Following fields are used in ErrorsAndWarningsBlock, which is not a 
part of analysis of findbugs --
+  Match
+Class 
name=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender$Element /
+Or
+  Field name=count /
+  Field name=timestampSeconds /
+/Or
+Bug pattern=URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD /
+  /Match
 /FindBugsFilter



hadoop git commit: YARN-3459. Fix failiure of TestLog4jWarningErrorMetricsAppender. (Varun Vasudev via wangda)

2015-04-08 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk f4b3fc562 - 7af086a51


YARN-3459. Fix failiure of TestLog4jWarningErrorMetricsAppender. (Varun Vasudev 
via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7af086a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7af086a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7af086a5

Branch: refs/heads/trunk
Commit: 7af086a515d573dc90ea4deec7f4e3f23622e0e8
Parents: f4b3fc5
Author: Wangda Tan wan...@apache.org
Authored: Wed Apr 8 10:57:48 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Apr 8 10:57:48 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7af086a5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d5f6ce0..7f0ddd8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -156,6 +156,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore 
called.
 (Bibin A Chundatt via ozawa)
 
+YARN-3459. Fix failiure of TestLog4jWarningErrorMetricsAppender.
+(Varun Vasudev via wangda)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7af086a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
index 61d4c4c..e788e80 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
@@ -84,7 +84,7 @@ public class TestLog4jWarningErrorMetricsAppender {
 Assert.assertEquals(1, appender.getErrorCounts(cutoff).get(0).longValue());
 Assert.assertEquals(1, appender.getErrorMessagesAndCounts(cutoff).get(0)
   .size());
-Thread.sleep(2000);
+Thread.sleep(3000);
 Assert.assertEquals(1, appender.getErrorCounts(cutoff).size());
 Assert.assertEquals(0, appender.getErrorCounts(cutoff).get(0).longValue());
 Assert.assertEquals(0, appender.getErrorMessagesAndCounts(cutoff).get(0)



hadoop git commit: YARN-2901 addendum: Fixed findbugs warning caused by previously patch

2015-04-08 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7af086a51 - ba9ee22ca


YARN-2901 addendum: Fixed findbugs warning caused by previously patch


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba9ee22c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba9ee22c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba9ee22c

Branch: refs/heads/trunk
Commit: ba9ee22ca4ed2c5ff447b66b2e2dfe25f6880fe0
Parents: 7af086a
Author: Wangda Tan wan...@apache.org
Authored: Wed Apr 8 11:02:06 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Apr 8 11:02:06 2015 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba9ee22c/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 943ecb0..375d19c 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -461,5 +461,14 @@
 Method name=recoverContainersOnNode /
 Bug pattern=RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE /
   /Match
-
+  
+  !-- Following fields are used in ErrorsAndWarningsBlock, which is not a 
part of analysis of findbugs --
+  Match
+Class 
name=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender$Element /
+Or
+  Field name=count /
+  Field name=timestampSeconds /
+/Or
+Bug pattern=URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD /
+  /Match
 /FindBugsFilter



hadoop git commit: HDFS-8046. Allow better control of getContentSummary. Contributed by Kihwal Lee. (cherry picked from commit 285b31e75e51ec8e3a796c2cb0208739368ca9b8)

2015-04-08 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 de7f9a8bc - 7e622076d


HDFS-8046. Allow better control of getContentSummary. Contributed by Kihwal Lee.
(cherry picked from commit 285b31e75e51ec8e3a796c2cb0208739368ca9b8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e622076
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e622076
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e622076

Branch: refs/heads/branch-2
Commit: 7e622076d41a85fc9a8600fb270564a085f5cd83
Parents: de7f9a8
Author: Kihwal Lee kih...@apache.org
Authored: Wed Apr 8 15:39:25 2015 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Wed Apr 8 15:39:25 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  2 ++
 .../main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 +++-
 .../server/namenode/ContentSummaryComputationContext.java | 10 +++---
 .../hdfs/server/namenode/FSDirStatAndListingOp.java   |  2 +-
 .../apache/hadoop/hdfs/server/namenode/FSDirectory.java   |  8 
 5 files changed, 21 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e622076/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7736f62..e7af8dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -79,6 +79,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8085. Move CorruptFileBlockIterator to a new hdfs.client.impl package.
 (szetszwo)
 
+HDFS-8046. Allow better control of getContentSummary (kihwal)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e622076/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 50e4b33..a8dfb02 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -200,7 +200,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_LIST_LIMIT = dfs.ls.limit;
   public static final int DFS_LIST_LIMIT_DEFAULT = 1000;
   public static final String  DFS_CONTENT_SUMMARY_LIMIT_KEY = 
dfs.content-summary.limit;
-  public static final int DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 0;
+  public static final int DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 5000;
+  public static final String  DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_KEY = 
dfs.content-summary.sleep-microsec;
+  public static final longDFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT = 500;
   public static final String  DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY = 
dfs.datanode.failed.volumes.tolerated;
   public static final int DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT = 
0;
   public static final String  DFS_DATANODE_SYNCONCLOSE_KEY = 
dfs.datanode.synconclose;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e622076/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 31f34b9..5739835 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -32,6 +32,8 @@ public class ContentSummaryComputationContext {
   private long nextCountLimit = 0;
   private long limitPerRun = 0;
   private long yieldCount = 0;
+  private long sleepMilliSec = 0;
+  private int sleepNanoSec = 0;
 
   /**
* Constructor
@@ -43,17 +45,19 @@ public class ContentSummaryComputationContext {
*no limit (i.e. no yielding)
*/
   public ContentSummaryComputationContext(FSDirectory dir,
-  FSNamesystem fsn, long limitPerRun) {
+  FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
 this.dir = dir;
 this.fsn = fsn;
 this.limitPerRun = limitPerRun;