svn commit: r1713764 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/

2015-11-10 Thread yliu
Author: yliu
Date: Wed Nov 11 05:23:37 2015
New Revision: 1713764

URL: http://svn.apache.org/viewvc?rev=1713764=rev
Log:
add yliu to PMC list

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
hadoop/common/site/main/publish/bylaws.pdf
hadoop/common/site/main/publish/index.pdf
hadoop/common/site/main/publish/issue_tracking.pdf
hadoop/common/site/main/publish/linkmap.pdf
hadoop/common/site/main/publish/mailing_lists.pdf
hadoop/common/site/main/publish/privacy_policy.pdf
hadoop/common/site/main/publish/releases.pdf
hadoop/common/site/main/publish/version_control.pdf
hadoop/common/site/main/publish/who.html
hadoop/common/site/main/publish/who.pdf

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1713764=1713763=1713764=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Wed 
Nov 11 05:23:37 2015
@@ -488,6 +488,14 @@
 
 
 
+yliu
+http://people.apache.org/~yliu;>Yi Liu
+Intel
+
++8
+
+
+
 zjshen
 http://people.apache.org/~zjshen;>Zhijie Shen
 Hortonworks

Modified: hadoop/common/site/main/publish/bylaws.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/bylaws.pdf?rev=1713764=1713763=1713764=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/index.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/index.pdf?rev=1713764=1713763=1713764=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/issue_tracking.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/issue_tracking.pdf?rev=1713764=1713763=1713764=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/linkmap.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/linkmap.pdf?rev=1713764=1713763=1713764=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/mailing_lists.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/mailing_lists.pdf?rev=1713764=1713763=1713764=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/privacy_policy.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/privacy_policy.pdf?rev=1713764=1713763=1713764=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/releases.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/releases.pdf?rev=1713764=1713763=1713764=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/version_control.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/version_control.pdf?rev=1713764=1713763=1713764=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/who.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/who.html?rev=1713764=1713763=1713764=diff
==
--- hadoop/common/site/main/publish/who.html (original)
+++ hadoop/common/site/main/publish/who.html Wed Nov 11 05:23:37 2015
@@ -903,6 +903,17 @@ document.write("Last Published: " + docu
     
 
 
+yliu
+http://people.apache.org/~yliu;>Yi Liu
+Intel
+
++8
+
+
+
+
+
+
 zjshen
 http://people.apache.org/~zjshen;>Zhijie Shen
 Hortonworks
@@ -927,7 +938,7 @@ document.write("Last Published: " + docu
 
 
 
-
+
 Emeritus Hadoop PMC Members
 
 
@@ -942,7 +953,7 @@ document.write("Last Published: " + docu
 
 

-
+
 Hadoop Committers
 
 Hadoop's active committers include:
@@ -2042,7 +2053,7 @@ document.write("Last Published: " + docu
 
 

-
+
 Emeritus Hadoop Committers
 
 Hadoop committers who are no longer active include:

Modified: hadoop/common/site/main/publish/who.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/m

hadoop git commit: HDFS-9275. Wait previous ErasureCodingWork to finish before schedule another one. (Walter Su via yliu)

2015-11-02 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 763240948 -> 5ba2b98d0


HDFS-9275. Wait previous ErasureCodingWork to finish before schedule another 
one. (Walter Su via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ba2b98d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ba2b98d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ba2b98d

Branch: refs/heads/trunk
Commit: 5ba2b98d0fe29603e136fc43a14f853e820cf7e2
Parents: 7632409
Author: yliu <y...@apache.org>
Authored: Tue Nov 3 09:14:32 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Tue Nov 3 09:14:32 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/blockmanagement/BlockManager.java|   5 +
 .../apache/hadoop/hdfs/StripedFileTestUtil.java |   8 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   2 +
 .../TestReadStripedFileWithMissingBlocks.java   |   6 +-
 .../hadoop/hdfs/TestRecoverStripedFile.java | 143 ++-
 .../hdfs/TestSafeModeWithStripedFile.java   |   5 +-
 .../hadoop/hdfs/TestWriteReadStripedFile.java   |   8 +-
 .../hdfs/TestWriteStripedFileWithFailure.java   |   6 +-
 .../TestBlockTokenWithDFSStriped.java   |   4 +-
 .../namenode/TestRecoverStripedBlocks.java  |  70 +
 11 files changed, 145 insertions(+), 115 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ba2b98d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c13a725..3c60549 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -849,6 +849,9 @@ Trunk (Unreleased)
   HDFS-8438. Erasure Coding: Allow concat striped files if they have the 
same
   ErasureCodingPolicy. (Walter Su via jing9)
 
+  HDFS-9275. Wait previous ErasureCodingWork to finish before schedule
+  another one. (Walter Su via yliu)
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ba2b98d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 897df1e..dbe0726 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1586,6 +1586,10 @@ public class BlockManager implements BlockStatsMXBean {
 }
 
 if (block.isStriped()) {
+  if (pendingNum > 0) {
+// Wait the previous recovery to finish.
+return null;
+  }
   short[] indices = new short[liveBlockIndices.size()];
   for (int i = 0 ; i < liveBlockIndices.size(); i++) {
 indices[i] = liveBlockIndices.get(i);
@@ -1641,6 +1645,7 @@ public class BlockManager implements BlockStatsMXBean {
 if (block.isStriped()) {
   assert rw instanceof ErasureCodingWork;
   assert rw.getTargets().length > 0;
+  assert pendingNum == 0: "Should wait the previous recovery to finish";
   String src = getBlockCollection(block).getName();
   ErasureCodingPolicy ecPolicy = null;
   try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ba2b98d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index cc6e7d3..9942a2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -61,10 +61,10 @@ public class StripedFileTestUtil {
   public static final int BLOCK_STRIPED_CELL_SIZE = 64 * 1024;
   public static final int BLOCK_STRIPE_SIZE = BLOCK_STRIPED_CELL_SIZE * 
NUM_DATA_BLOCKS;
 
-  static final int stripesPerBlock = 4;
-  static final int blockSize = BLOCK_STRIPED_CELL_SIZE * stripesPerBlock;
-  static final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
-  static final int BLOCK_GROUP_SIZE = blockSize * NUM_DATA_BLOCKS;
+  publi

[2/2] hadoop git commit: Revert "HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee."

2015-10-31 Thread yliu
Revert "HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee."

This reverts commit 43539b5ff4ac0874a8a454dc93a2a782b0e0ea8f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fd64167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fd64167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fd64167

Branch: refs/heads/trunk
Commit: 7fd6416759cbb202ed21b47d28c1587e04a5cdc6
Parents: 2ea4413
Author: yliu <y...@apache.org>
Authored: Sat Oct 31 16:20:48 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Sat Oct 31 16:20:48 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ---
 .../blockmanagement/BlockPlacementPolicyDefault.java| 12 
 2 files changed, 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd64167/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 211e7fc..30cdfee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,9 +2201,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd64167/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index f610574..d9b8d60 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -659,7 +659,6 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
-int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -709,17 +708,6 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (storage == null);
   }
-  // Refresh the node count. If the live node count became smaller,
-  // but it is not reflected in this loop, it may loop forever in case
-  // the replicas/rack cannot be satisfied.
-  if (--refreshCounter == 0) {
-refreshCounter = clusterMap.countNumOfAvailableNodes(scope,
-excludedNodes);
-// It has already gone through enough number of nodes.
-if (refreshCounter <= excludedNodes.size()) {
-  break;
-}
-  }
 }
   
 if (numOfReplicas>0) {



hadoop git commit: Revert "HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee."

2015-10-31 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 c8767fb68 -> e1798e8f2


Revert "HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee."

This reverts commit c250b21c23945ce2c580186c224cc65ab2b501fc.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1798e8f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1798e8f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1798e8f

Branch: refs/heads/branch-2.7
Commit: e1798e8f2148d27fbabc45b56fa03f690f8613d2
Parents: c8767fb
Author: yliu <y...@apache.org>
Authored: Sat Oct 31 16:25:53 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Sat Oct 31 16:25:53 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ---
 .../blockmanagement/BlockPlacementPolicyDefault.java| 12 
 2 files changed, 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1798e8f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7ad45c4..8cf1bd9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -89,9 +89,6 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1798e8f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 086abca..97ea782 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -622,7 +622,6 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
-int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -676,17 +675,6 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (i == storages.length);
   }
-  // Refresh the node count. If the live node count became smaller,
-  // but it is not reflected in this loop, it may loop forever in case
-  // the replicas/rack cannot be satisfied.
-  if (--refreshCounter == 0) {
-refreshCounter = clusterMap.countNumOfAvailableNodes(scope,
-excludedNodes);
-// It has already gone through enough number of nodes.
-if (refreshCounter <= excludedNodes.size()) {
-  break;
-}
-  }
 }
   
 if (numOfReplicas>0) {



[1/2] hadoop git commit: Revert "fix CHANGES.txt"

2015-10-31 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b2d6ea5e1 -> caa5da06f


Revert "fix CHANGES.txt"

This reverts commit 4c33f883c468ad17528041f3ffb00f5603fccc8f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/511e724f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/511e724f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/511e724f

Branch: refs/heads/branch-2
Commit: 511e724fff3f9382a782062ef954705c48a7
Parents: b2d6ea5
Author: yliu <y...@apache.org>
Authored: Sat Oct 31 16:23:16 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Sat Oct 31 16:23:16 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/511e724f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 53375b9..6c735bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1364,6 +1364,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -1436,9 +1439,6 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[1/2] hadoop git commit: Revert "fix CHANGES.txt"

2015-10-31 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk a4a6b5b4b -> 7fd641675


Revert "fix CHANGES.txt"

This reverts commit 3c0204a5866520e74917b26b6ac2061650a5bb6d.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ea4413b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ea4413b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ea4413b

Branch: refs/heads/trunk
Commit: 2ea4413b15f82a032d6dbd2532861d82a299461a
Parents: a4a6b5b
Author: yliu <y...@apache.org>
Authored: Sat Oct 31 16:20:37 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Sat Oct 31 16:20:37 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ea4413b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5a61eed..211e7fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,6 +2201,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -2273,9 +2276,6 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[2/2] hadoop git commit: Revert "HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee."

2015-10-31 Thread yliu
Revert "HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee."

This reverts commit 0c7fd26302109273a07b6087a9b92c9dff080816.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/caa5da06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/caa5da06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/caa5da06

Branch: refs/heads/branch-2
Commit: caa5da06f2c1bf4450d0760287312dc284dc74f9
Parents: 511e724
Author: yliu <y...@apache.org>
Authored: Sat Oct 31 16:23:26 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Sat Oct 31 16:23:26 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ---
 .../blockmanagement/BlockPlacementPolicyDefault.java| 12 
 2 files changed, 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa5da06/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6c735bc..40ef124 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1364,9 +1364,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/caa5da06/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index f610574..d9b8d60 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -659,7 +659,6 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
-int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -709,17 +708,6 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (storage == null);
   }
-  // Refresh the node count. If the live node count became smaller,
-  // but it is not reflected in this loop, it may loop forever in case
-  // the replicas/rack cannot be satisfied.
-  if (--refreshCounter == 0) {
-refreshCounter = clusterMap.countNumOfAvailableNodes(scope,
-excludedNodes);
-// It has already gone through enough number of nodes.
-if (refreshCounter <= excludedNodes.size()) {
-  break;
-}
-  }
 }
   
 if (numOfReplicas>0) {



hadoop git commit: HDFS-9302. WebHDFS throws NullPointerException if newLength is not provided. (Jagadesh Kiran N via yliu)

2015-10-28 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 97913f430 -> 6ff6663f6


HDFS-9302. WebHDFS throws NullPointerException if newLength is not provided. 
(Jagadesh Kiran N via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ff6663f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ff6663f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ff6663f

Branch: refs/heads/trunk
Commit: 6ff6663f64476eab5612ae9eb409104f44c6e6c7
Parents: 97913f4
Author: yliu <y...@apache.org>
Authored: Wed Oct 28 15:54:04 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Wed Oct 28 15:54:04 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../server/namenode/web/resources/NamenodeWebHdfsMethods.java| 4 
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ff6663f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fc41df4..7f3052f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2170,6 +2170,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9231. fsck doesn't list correct file path when Bad Replicas/Blocks
 are in a snapshot. (Xiao Chen via Yongjun Zhang)
 
+HDFS-9302. WebHDFS throws NullPointerException if newLength is not
+provided. (Jagadesh Kiran N via yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ff6663f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index b03e074..04e89a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -707,6 +707,10 @@ public class NamenodeWebHdfsMethods {
 }
 case TRUNCATE:
 {
+  if (newLength.getValue() == null) {
+throw new IllegalArgumentException(
+"newLength parameter is Missing");
+  }
   // We treat each rest request as a separate client.
   final boolean b = np.truncate(fullpath, newLength.getValue(), 
   "DFSClient_" + DFSUtil.getSecureRandom().nextLong());



hadoop git commit: HDFS-9302. WebHDFS throws NullPointerException if newLength is not provided. (Jagadesh Kiran N via yliu)

2015-10-28 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6fed564d2 -> 7b90f7463


HDFS-9302. WebHDFS throws NullPointerException if newLength is not provided. 
(Jagadesh Kiran N via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b90f746
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b90f746
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b90f746

Branch: refs/heads/branch-2
Commit: 7b90f74636d15c865501bd1556de21191cd7d64f
Parents: 6fed564
Author: yliu <y...@apache.org>
Authored: Wed Oct 28 15:50:32 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Wed Oct 28 15:50:32 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../server/namenode/web/resources/NamenodeWebHdfsMethods.java| 4 
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b90f746/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 957f9ba..337d75b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1338,6 +1338,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9231. fsck doesn't list correct file path when Bad Replicas/Blocks
 are in a snapshot. (Xiao Chen via Yongjun Zhang)
 
+HDFS-9302. WebHDFS throws NullPointerException if newLength is not
+provided. (Jagadesh Kiran N via yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b90f746/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index b03e074..04e89a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -707,6 +707,10 @@ public class NamenodeWebHdfsMethods {
 }
 case TRUNCATE:
 {
+  if (newLength.getValue() == null) {
+throw new IllegalArgumentException(
+"newLength parameter is Missing");
+  }
   // We treat each rest request as a separate client.
   final boolean b = np.truncate(fullpath, newLength.getValue(), 
   "DFSClient_" + DFSUtil.getSecureRandom().nextLong());



hadoop git commit: HADOOP-12040. Adjust inputs order for the decode API in raw erasure coder. (Kai Zheng via yliu)

2015-10-28 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6ff6663f6 -> c201cf951


HADOOP-12040. Adjust inputs order for the decode API in raw erasure coder. (Kai 
Zheng via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c201cf95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c201cf95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c201cf95

Branch: refs/heads/trunk
Commit: c201cf951d5adefefe7c68e882a0c07962248577
Parents: 6ff6663
Author: yliu <y...@apache.org>
Authored: Wed Oct 28 16:18:23 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Wed Oct 28 16:18:23 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../coder/AbstractErasureDecoder.java   | 23 +++
 .../rawcoder/AbstractRawErasureCoder.java   |  6 ++
 .../io/erasurecode/rawcoder/RSRawDecoder.java   | 69 +++-
 .../erasurecode/rawcoder/RawErasureDecoder.java |  8 +--
 .../hadoop/io/erasurecode/TestCoderBase.java| 44 +++--
 .../erasurecode/coder/TestErasureCoderBase.java |  8 +--
 .../erasurecode/coder/TestRSErasureCoder.java   |  7 +-
 .../hadoop/hdfs/DFSStripedInputStream.java  | 41 +---
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 37 ++-
 .../erasurecode/ErasureCodingWorker.java| 11 +---
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 13 ++--
 12 files changed, 154 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c201cf95/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 25a3a60..4b9a707 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -922,6 +922,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11685. StorageException complaining " no lease ID" during HBase
 distributed log splitting (Duo Xu via cnauroth)
 
+HADOOP-12040. Adjust inputs order for the decode API in raw erasure coder.
+(Kai Zheng via yliu)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c201cf95/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
index 6fdae93..abada3d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
@@ -59,13 +59,14 @@ public abstract class AbstractErasureDecoder extends 
AbstractErasureCoder {
* @return
*/
   protected ECBlock[] getInputBlocks(ECBlockGroup blockGroup) {
-ECBlock[] inputBlocks = new ECBlock[getNumParityUnits()
-+ getNumDataUnits()];
+ECBlock[] inputBlocks = new ECBlock[getNumDataUnits() +
+getNumParityUnits()];
 
-System.arraycopy(blockGroup.getParityBlocks(), 0, inputBlocks, 0,
-getNumParityUnits());
 System.arraycopy(blockGroup.getDataBlocks(), 0, inputBlocks,
-getNumParityUnits(), getNumDataUnits());
+0, getNumDataUnits());
+
+System.arraycopy(blockGroup.getParityBlocks(), 0, inputBlocks,
+getNumDataUnits(), getNumParityUnits());
 
 return inputBlocks;
   }
@@ -80,18 +81,18 @@ public abstract class AbstractErasureDecoder extends 
AbstractErasureCoder {
 
 int idx = 0;
 
-for (int i = 0; i < getNumParityUnits(); i++) {
-  if (blockGroup.getParityBlocks()[i].isErased()) {
-outputBlocks[idx++] = blockGroup.getParityBlocks()[i];
-  }
-}
-
 for (int i = 0; i < getNumDataUnits(); i++) {
   if (blockGroup.getDataBlocks()[i].isErased()) {
 outputBlocks[idx++] = blockGroup.getDataBlocks()[i];
   }
 }
 
+for (int i = 0; i < getNumParityUnits(); i++) {
+  if (blockGroup.getParityBlocks()[i].isErased()) {
+outputBlocks[idx++] = blockGroup.getParityBlocks()[i];
+  }
+}
+
 return outputBlocks;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c201cf95/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java

hadoop git commit: HDFS-7087. Ability to list /.reserved. (Xiao Chen via wang)

2015-10-21 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2d577115a -> 7579ec5bf


HDFS-7087. Ability to list /.reserved. (Xiao Chen via wang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7579ec5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7579ec5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7579ec5b

Branch: refs/heads/branch-2
Commit: 7579ec5bf5689b217211681972091d1d53becc70
Parents: 2d57711
Author: yliu <y...@apache.org>
Authored: Thu Oct 22 10:42:18 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Thu Oct 22 10:42:18 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hdfs/server/namenode/FSDirAttrOp.java   |   7 +
 .../hdfs/server/namenode/FSDirDeleteOp.java |   5 +
 .../hdfs/server/namenode/FSDirRenameOp.java |   6 +
 .../server/namenode/FSDirStatAndListingOp.java  |  16 ++
 .../hdfs/server/namenode/FSDirSymlinkOp.java|   3 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |   7 +
 .../hdfs/server/namenode/FSDirectory.java   |  63 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   1 +
 .../org/apache/hadoop/fs/TestGlobPaths.java |   4 -
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 194 +++
 .../hadoop/hdfs/TestReservedRawPaths.java   |  13 +-
 12 files changed, 307 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7579ec5b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 39298a1..394fb4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -719,6 +719,8 @@ Release 2.8.0 - UNRELEASED
 TestBlockManager.testBlocksAreNotUnderreplicatedInSingleRack.
 (Masatake Iwasaki via wang)
 
+HDFS-7087. Ability to list /.reserved. (Xiao Chen via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7579ec5b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 0a138f5..7f41078 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.UnresolvedLinkException;
@@ -50,6 +51,9 @@ public class FSDirAttrOp {
   FSDirectory fsd, final String srcArg, FsPermission permission)
   throws IOException {
 String src = srcArg;
+if (FSDirectory.isExactReservedName(src)) {
+  throw new InvalidPathException(src);
+}
 FSPermissionChecker pc = fsd.getPermissionChecker();
 byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 INodesInPath iip;
@@ -69,6 +73,9 @@ public class FSDirAttrOp {
   static HdfsFileStatus setOwner(
   FSDirectory fsd, String src, String username, String group)
   throws IOException {
+if (FSDirectory.isExactReservedName(src)) {
+  throw new InvalidPathException(src);
+}
 FSPermissionChecker pc = fsd.getPermissionChecker();
 byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 INodesInPath iip;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7579ec5b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 51d643a..006fbc2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.name

hadoop git commit: HDFS-9208. Disabling atime may fail clients like distCp. (Kihwal Lee via yliu)

2015-10-19 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 acc0e718d -> 03bb40612


HDFS-9208. Disabling atime may fail clients like distCp. (Kihwal Lee via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03bb4061
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03bb4061
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03bb4061

Branch: refs/heads/branch-2
Commit: 03bb40612e56b3061760095289368172795fdc08
Parents: acc0e71
Author: yliu <y...@apache.org>
Authored: Tue Oct 20 11:31:14 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Tue Oct 20 11:31:14 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/FSDirAttrOp.java   |  8 -
 .../org/apache/hadoop/hdfs/TestSetTimes.java| 31 
 3 files changed, 34 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03bb4061/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dbd333a..a867506 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1254,6 +1254,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9237. NPE at TestDataNodeVolumeFailureToleration#tearDown.
 (Brahma Reddy Battula via ozawa)
 
+HDFS-9208. Disabling atime may fail clients like distCp. (Kihwal Lee via
+yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03bb4061/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index df0bc20..0a138f5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -42,7 +42,6 @@ import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
 
@@ -97,13 +96,6 @@ public class FSDirAttrOp {
   static HdfsFileStatus setTimes(
   FSDirectory fsd, String src, long mtime, long atime)
   throws IOException {
-if (!fsd.isAccessTimeSupported() && atime != -1) {
-  throw new IOException(
-  "Access time for hdfs is not configured. " +
-  " Please set " + DFS_NAMENODE_ACCESSTIME_PRECISION_KEY
-  + " configuration parameter.");
-}
-
 FSPermissionChecker pc = fsd.getPermissionChecker();
 byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03bb4061/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
index 4e6091b..a90d139 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
@@ -40,6 +40,7 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.MockitoUtil;
 import org.apache.hadoop.util.Time;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -309,6 +310,36 @@ public class TestSetTimes {
 }
   }
 
+  /**
+   * Test whether atime can be set explicitly even when the atime support is
+   * disabled.
+   */
+  @Test
+  public void testAtimeUpdate() throws Exception {
+Configuration conf = new HdfsConfiguration();
+conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
+MiniDFSCluster cluster = null;
+FileSystem fs = null;
+
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .numDataNodes(0)
+  .build();
+  fs = cluster.getFileSystem();
+
+ 

hadoop git commit: HDFS-9208. Disabling atime may fail clients like distCp. (Kihwal Lee via yliu)

2015-10-19 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7e2837f83 -> 9cb5d3535


HDFS-9208. Disabling atime may fail clients like distCp. (Kihwal Lee via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9cb5d353
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9cb5d353
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9cb5d353

Branch: refs/heads/trunk
Commit: 9cb5d353533e829e624b33fb26dd37c177e1e046
Parents: 7e2837f
Author: yliu <y...@apache.org>
Authored: Tue Oct 20 11:33:04 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Tue Oct 20 11:33:04 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/FSDirAttrOp.java   |  8 -
 .../org/apache/hadoop/hdfs/TestSetTimes.java| 31 
 3 files changed, 34 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cb5d353/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 465b77c..c1e42ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2081,6 +2081,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9237. NPE at TestDataNodeVolumeFailureToleration#tearDown.
 (Brahma Reddy Battula via ozawa)
 
+HDFS-9208. Disabling atime may fail clients like distCp. (Kihwal Lee via
+yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cb5d353/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 46e172d..9099970 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -42,7 +42,6 @@ import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
 
@@ -97,13 +96,6 @@ public class FSDirAttrOp {
   static HdfsFileStatus setTimes(
   FSDirectory fsd, String src, long mtime, long atime)
   throws IOException {
-if (!fsd.isAccessTimeSupported() && atime != -1) {
-  throw new IOException(
-  "Access time for hdfs is not configured. " +
-  " Please set " + DFS_NAMENODE_ACCESSTIME_PRECISION_KEY
-  + " configuration parameter.");
-}
-
 FSPermissionChecker pc = fsd.getPermissionChecker();
 byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cb5d353/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
index 4e6091b..a90d139 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
@@ -40,6 +40,7 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.MockitoUtil;
 import org.apache.hadoop.util.Time;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -309,6 +310,36 @@ public class TestSetTimes {
 }
   }
 
+  /**
+   * Test whether atime can be set explicitly even when the atime support is
+   * disabled.
+   */
+  @Test
+  public void testAtimeUpdate() throws Exception {
+Configuration conf = new HdfsConfiguration();
+conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
+MiniDFSCluster cluster = null;
+FileSystem fs = null;
+
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .numDataNodes(0)
+  .build();
+  fs = cluster.getFileSystem();
+
+ 

hadoop git commit: HADOOP-12483. Maintain wrapped SASL ordering for postponed IPC responses. (Daryn Sharp via yliu)

2015-10-18 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f7d746a81 -> 2c0dbf728


HADOOP-12483. Maintain wrapped SASL ordering for postponed IPC responses. 
(Daryn Sharp via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c0dbf72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c0dbf72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c0dbf72

Branch: refs/heads/branch-2
Commit: 2c0dbf728a9ade29eb5c86e798bd6d75de956e5a
Parents: f7d746a
Author: yliu <y...@apache.org>
Authored: Mon Oct 19 09:52:39 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Mon Oct 19 09:52:39 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../main/java/org/apache/hadoop/ipc/Server.java |  64 ++-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 114 ++-
 3 files changed, 150 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c0dbf72/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 877513a..a19c4b8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -658,6 +658,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-10941. Proxy user verification NPEs if remote host is unresolvable.
 (Benoy Antony via stevel).
 
+HADOOP-12483. Maintain wrapped SASL ordering for postponed IPC responses.
+(Daryn Sharp via yliu)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c0dbf72/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 3b986c5..152de2b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -581,6 +581,11 @@ public abstract class Server {
 private final byte[] clientId;
 private final TraceScope traceScope; // the HTrace scope on the server side
 
+private Call(Call call) {
+  this(call.callId, call.retryCount, call.rpcRequest, call.connection,
+  call.rpcKind, call.clientId, call.traceScope);
+}
+
 public Call(int id, int retryCount, Writable param, 
 Connection connection) {
   this(id, retryCount, param, connection, RPC.RpcKind.RPC_BUILTIN,
@@ -611,12 +616,6 @@ public abstract class Server {
   + retryCount;
 }
 
-public void setResponse(Throwable t) throws IOException {
-  setupResponse(new ByteArrayOutputStream(), this,
-  RpcStatusProto.FATAL, RpcErrorCodeProto.ERROR_RPC_SERVER,
-  null, t.getClass().getName(), StringUtils.stringifyException(t));
-}
-
 public void setResponse(ByteBuffer response) {
   this.rpcResponse = response;
 }
@@ -641,14 +640,23 @@ public abstract class Server {
   int count = responseWaitCount.decrementAndGet();
   assert count >= 0 : "response has already been sent";
   if (count == 0) {
-if (rpcResponse == null) {
-  // needed by postponed operations to indicate an exception has
-  // occurred.  it's too late to re-encode the response so just
-  // drop the connection.
-  connection.close();
-} else {
-  connection.sendResponse(this);
-}
+connection.sendResponse(this);
+  }
+}
+
+@InterfaceStability.Unstable
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+public void abortResponse(Throwable t) throws IOException {
+  // don't send response if the call was already sent or aborted.
+  if (responseWaitCount.getAndSet(-1) > 0) {
+// clone the call to prevent a race with the other thread stomping
+// on the response while being sent.  the original call is
+// effectively discarded since the wait count won't hit zero
+Call call = new Call(this);
+setupResponse(new ByteArrayOutputStream(), call,
+RpcStatusProto.FATAL, RpcErrorCodeProto.ERROR_RPC_SERVER,
+null, t.getClass().getName(), StringUtils.stringifyException(t));
+call.sendResponse();
   }
 }
 
@@ -1153,6 +1161,13 @@ public abstract class Server {
 //
 void doRespond(Call call) throws IOException {
   synchronized (call.connecti

hadoop git commit: HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling values() since it creates a temporary array. (Staffan Friberg via yliu)

2015-10-12 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk db9304788 -> 0ff121610


HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling 
values() since it creates a temporary array. (Staffan Friberg via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ff12161
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ff12161
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ff12161

Branch: refs/heads/trunk
Commit: 0ff1216100d16cfa862854a89cd1be8969b0bd7e
Parents: db93047
Author: yliu <y...@apache.org>
Authored: Mon Oct 12 14:28:15 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Mon Oct 12 14:28:15 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../apache/hadoop/hdfs/server/common/HdfsServerConstants.java  | 6 --
 2 files changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ff12161/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 541dc99..4171433 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1509,6 +1509,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9110. Use Files.walkFileTree in NNUpgradeUtil#doPreUpgrade for
 better efficiency. (Charlie Helin via wang)
 
+HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
+values() since it creates a temporary array. (Staffan Friberg via yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ff12161/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 6208a7d..13c9137 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -299,6 +299,8 @@ public interface HdfsServerConstants {
 /** Temporary replica: created for replication and relocation only. */
 TEMPORARY(4);
 
+private static final ReplicaState[] cachedValues = ReplicaState.values();
+
 private final int value;
 
 ReplicaState(int v) {
@@ -310,12 +312,12 @@ public interface HdfsServerConstants {
 }
 
 public static ReplicaState getState(int v) {
-  return ReplicaState.values()[v];
+  return cachedValues[v];
 }
 
 /** Read from in */
 public static ReplicaState read(DataInput in) throws IOException {
-  return values()[in.readByte()];
+  return cachedValues[in.readByte()];
 }
 
 /** Write to out */



hadoop git commit: HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in BlockManager#excessReplicateMap. (yliu)

2015-10-12 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0ff121610 -> 73b86a504


HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in 
BlockManager#excessReplicateMap. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73b86a50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73b86a50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73b86a50

Branch: refs/heads/trunk
Commit: 73b86a5046fe3262dde7b05be46b18575e35fd5f
Parents: 0ff1216
Author: yliu <y...@apache.org>
Authored: Mon Oct 12 14:40:14 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Mon Oct 12 14:40:14 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockManager.java| 23 
 .../hdfs/server/namenode/NamenodeFsck.java  |  4 ++--
 3 files changed, 14 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73b86a50/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4171433..938546c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1512,6 +1512,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
 values() since it creates a temporary array. (Staffan Friberg via yliu)
 
+HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in
+BlockManager#excessReplicateMap. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73b86a50/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 18bfc41..8a64b74 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,7 +89,6 @@ import 
org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
-import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 
 import static 
org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
@@ -219,7 +218,7 @@ public class BlockManager implements BlockStatsMXBean {
* Maps a StorageID to the set of blocks that are "extra" for this
* DataNode. We'll eventually remove these extras.
*/
-  public final Map<String, LightWeightLinkedSet> excessReplicateMap 
=
+  public final Map<String, LightWeightHashSet> excessReplicateMap =
 new HashMap<>();
 
   /**
@@ -1421,11 +1420,6 @@ public class BlockManager implements BlockStatsMXBean {
*/
   @VisibleForTesting
   int computeRecoveryWorkForBlocks(List<List> blocksToRecover) {
-int requiredReplication, numEffectiveReplicas;
-List containingNodes;
-BlockCollection bc;
-int additionalReplRequired;
-
 int scheduledWork = 0;
 List recovWork = new LinkedList<>();
 
@@ -1786,7 +1780,7 @@ public class BlockManager implements BlockStatsMXBean {
 Collection nodesCorrupt = 
corruptReplicas.getNodes(block);
 for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
   final DatanodeDescriptor node = storage.getDatanodeDescriptor();
-  LightWeightLinkedSet excessBlocks =
+  LightWeightHashSet excessBlocks =
 excessReplicateMap.get(node.getDatanodeUuid());
   int countableReplica = storage.getState() == State.NORMAL ? 1 : 0;
   if ((nodesCorrupt != null) && (nodesCorrupt.contains(node)))
@@ -3090,7 +3084,7 @@ public class BlockManager implements BlockStatsMXBean {
 postponeBlock(block);
 return;
   }
-  LightWeightLinkedSet excessBlocks = excessReplicateMap.get(
+  LightWeightHashSet excessBlocks = excessReplicateMap.get(
   cur.getDatanodeUuid());
   if (excessBlocks == null || !excessBlocks.contains(block)) {
 if (!cur.isDecommissionInProgress() && !cur.isDecommiss

hadoop git commit: HADOOP-10300. Allowed deferred sending of call responses. (Daryn Sharp via yliu)

2015-10-12 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 049c6e8dc -> e617cf6dd


HADOOP-10300. Allowed deferred sending of call responses. (Daryn Sharp via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e617cf6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e617cf6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e617cf6d

Branch: refs/heads/trunk
Commit: e617cf6dd13f2bb5d7cbb15ee2cdb260ecd46cd3
Parents: 049c6e8
Author: yliu <y...@apache.org>
Authored: Mon Oct 12 16:09:14 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Mon Oct 12 16:09:14 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../main/java/org/apache/hadoop/ipc/Server.java |  76 ++---
 .../hadoop/ipc/TestIPCServerResponder.java  | 161 +--
 3 files changed, 212 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e617cf6d/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9d954d0..9ab19f9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -621,6 +621,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12360. Create StatsD metrics2 sink. (Dave Marion via stevel)
 
+HADOOP-10300. Allowed deferred sending of call responses. (Daryn Sharp via
+yliu)
+
   IMPROVEMENTS
 
 HADOOP-12458. Retries is typoed to spell Retires in parts of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e617cf6d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index b8026c6..1f35795 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -579,6 +579,7 @@ public abstract class Server {
 private long timestamp;   // time received when response is 
null
   // time served when response is not 
null
 private ByteBuffer rpcResponse;   // the response for this call
+private AtomicInteger responseWaitCount = new AtomicInteger(1);
 private final RPC.RpcKind rpcKind;
 private final byte[] clientId;
 private final TraceScope traceScope; // the HTrace scope on the server side
@@ -613,10 +614,47 @@ public abstract class Server {
   + retryCount;
 }
 
+public void setResponse(Throwable t) throws IOException {
+  setupResponse(new ByteArrayOutputStream(), this,
+  RpcStatusProto.FATAL, RpcErrorCodeProto.ERROR_RPC_SERVER,
+  null, t.getClass().getName(), StringUtils.stringifyException(t));
+}
+
 public void setResponse(ByteBuffer response) {
   this.rpcResponse = response;
 }
 
+/**
+ * Allow a IPC response to be postponed instead of sent immediately
+ * after the handler returns from the proxy method.  The intended use
+ * case is freeing up the handler thread when the response is known,
+ * but an expensive pre-condition must be satisfied before it's sent
+ * to the client.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+public void postponeResponse() {
+  int count = responseWaitCount.incrementAndGet();
+  assert count > 0 : "response has already been sent";
+}
+
+@InterfaceStability.Unstable
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+public void sendResponse() throws IOException {
+  int count = responseWaitCount.decrementAndGet();
+  assert count >= 0 : "response has already been sent";
+  if (count == 0) {
+if (rpcResponse == null) {
+  // needed by postponed operations to indicate an exception has
+  // occurred.  it's too late to re-encode the response so just
+  // drop the connection.
+  connection.close();
+} else {
+  connection.sendResponse(this);
+}
+  }
+}
+
 // For Schedulable
 @Override
 public UserGroupInformation getUserGroupInformation() {
@@ -1227,10 +1265,6 @@ public abstract class Server {
 RpcConstants.INVALID_RETRY_COUNT, null, this);
 private ByteArrayOutputStream authFailedResponse = new 
ByteArrayOutputStream();
 
-private final Call saslCall = new Call(AuthPr

hadoop git commit: HADOOP-10300. Allowed deferred sending of call responses. (Daryn Sharp via yliu)

2015-10-12 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9988b57e7 -> 146f297d7


HADOOP-10300. Allowed deferred sending of call responses. (Daryn Sharp via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/146f297d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/146f297d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/146f297d

Branch: refs/heads/branch-2
Commit: 146f297d7dae1ab3696f1fcf6a90a7c249df68e6
Parents: 9988b57
Author: yliu <y...@apache.org>
Authored: Mon Oct 12 16:05:47 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Mon Oct 12 16:05:47 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../main/java/org/apache/hadoop/ipc/Server.java |  76 ++---
 .../hadoop/ipc/TestIPCServerResponder.java  | 161 +--
 3 files changed, 212 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/146f297d/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1dc42e9..897d096 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -33,6 +33,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12360. Create StatsD metrics2 sink. (Dave Marion via stevel)
 
+HADOOP-10300. Allowed deferred sending of call responses. (Daryn Sharp via
+yliu)
+
   IMPROVEMENTS
 
 HADOOP-12458. Retries is typoed to spell Retires in parts of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/146f297d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 8b3976c..3b986c5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -576,6 +576,7 @@ public abstract class Server {
 private long timestamp;   // time received when response is 
null
   // time served when response is not 
null
 private ByteBuffer rpcResponse;   // the response for this call
+private AtomicInteger responseWaitCount = new AtomicInteger(1);
 private final RPC.RpcKind rpcKind;
 private final byte[] clientId;
 private final TraceScope traceScope; // the HTrace scope on the server side
@@ -610,10 +611,47 @@ public abstract class Server {
   + retryCount;
 }
 
+public void setResponse(Throwable t) throws IOException {
+  setupResponse(new ByteArrayOutputStream(), this,
+  RpcStatusProto.FATAL, RpcErrorCodeProto.ERROR_RPC_SERVER,
+  null, t.getClass().getName(), StringUtils.stringifyException(t));
+}
+
 public void setResponse(ByteBuffer response) {
   this.rpcResponse = response;
 }
 
+/**
+ * Allow a IPC response to be postponed instead of sent immediately
+ * after the handler returns from the proxy method.  The intended use
+ * case is freeing up the handler thread when the response is known,
+ * but an expensive pre-condition must be satisfied before it's sent
+ * to the client.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+public void postponeResponse() {
+  int count = responseWaitCount.incrementAndGet();
+  assert count > 0 : "response has already been sent";
+}
+
+@InterfaceStability.Unstable
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+public void sendResponse() throws IOException {
+  int count = responseWaitCount.decrementAndGet();
+  assert count >= 0 : "response has already been sent";
+  if (count == 0) {
+if (rpcResponse == null) {
+  // needed by postponed operations to indicate an exception has
+  // occurred.  it's too late to re-encode the response so just
+  // drop the connection.
+  connection.close();
+} else {
+  connection.sendResponse(this);
+}
+  }
+}
+
 // For Schedulable
 @Override
 public UserGroupInformation getUserGroupInformation() {
@@ -1224,10 +1262,6 @@ public abstract class Server {
 RpcConstants.INVALID_RETRY_COUNT, null, this);
 private ByteArrayOutputStream authFailedResponse = new 
ByteArrayOutputStream();
 
-private final Call saslCall = new Call(AuthPr

hadoop git commit: HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling values() since it creates a temporary array. (Staffan Friberg via yliu)

2015-10-12 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 28edc7b12 -> 5e2a44a7f


HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling 
values() since it creates a temporary array. (Staffan Friberg via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e2a44a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e2a44a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e2a44a7

Branch: refs/heads/branch-2
Commit: 5e2a44a7fef75e76d95fe4259c736ec4a7978fce
Parents: 28edc7b
Author: yliu <y...@apache.org>
Authored: Mon Oct 12 14:30:01 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Mon Oct 12 14:30:01 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../apache/hadoop/hdfs/server/common/HdfsServerConstants.java  | 6 --
 2 files changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e2a44a7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 622e8dd..8bf2285 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -682,6 +682,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9110. Use Files.walkFileTree in NNUpgradeUtil#doPreUpgrade for
 better efficiency. (Charlie Helin via wang)
 
+HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
+values() since it creates a temporary array. (Staffan Friberg via yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e2a44a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index ef2027e..e447d3a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -292,6 +292,8 @@ public interface HdfsServerConstants {
 /** Temporary replica: created for replication and relocation only. */
 TEMPORARY(4);
 
+private static final ReplicaState[] cachedValues = ReplicaState.values();
+
 private final int value;
 
 ReplicaState(int v) {
@@ -303,12 +305,12 @@ public interface HdfsServerConstants {
 }
 
 public static ReplicaState getState(int v) {
-  return ReplicaState.values()[v];
+  return cachedValues[v];
 }
 
 /** Read from in */
 public static ReplicaState read(DataInput in) throws IOException {
-  return values()[in.readByte()];
+  return cachedValues[in.readByte()];
 }
 
 /** Write to out */



hadoop git commit: HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in BlockManager#excessReplicateMap. (yliu)

2015-10-12 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5e2a44a7f -> 85b5481d8


HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in 
BlockManager#excessReplicateMap. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85b5481d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85b5481d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85b5481d

Branch: refs/heads/branch-2
Commit: 85b5481d87adedd4df3dee5a690539395460eaaf
Parents: 5e2a44a
Author: yliu <y...@apache.org>
Authored: Mon Oct 12 14:55:58 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Mon Oct 12 14:55:58 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockManager.java| 25 ++--
 .../hdfs/server/namenode/NamenodeFsck.java  |  4 ++--
 3 files changed, 18 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85b5481d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8bf2285..b9e9e7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -685,6 +685,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
 values() since it creates a temporary array. (Staffan Friberg via yliu)
 
+HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in
+BlockManager#excessReplicateMap. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85b5481d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ceca627..dea7c3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -85,7 +85,6 @@ import 
org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
-import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -206,7 +205,7 @@ public class BlockManager implements BlockStatsMXBean {
* Maps a StorageID to the set of blocks that are "extra" for this
* DataNode. We'll eventually remove these extras.
*/
-  public final Map<String, LightWeightLinkedSet> excessReplicateMap =
+  public final Map<String, LightWeightHashSet> excessReplicateMap =
 new HashMap<>();
 
   /**
@@ -1634,7 +1633,7 @@ public class BlockManager implements BlockStatsMXBean {
 Collection nodesCorrupt = 
corruptReplicas.getNodes(block);
 for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
   final DatanodeDescriptor node = storage.getDatanodeDescriptor();
-  LightWeightLinkedSet excessBlocks =
+  LightWeightHashSet excessBlocks =
 excessReplicateMap.get(node.getDatanodeUuid());
   int countableReplica = storage.getState() == State.NORMAL ? 1 : 0; 
   if ((nodesCorrupt != null) && (nodesCorrupt.contains(node)))
@@ -2888,8 +2887,8 @@ public class BlockManager implements BlockStatsMXBean {
 postponeBlock(block);
 return;
   }
-  LightWeightLinkedSet excessBlocks = excessReplicateMap.get(cur
-  .getDatanodeUuid());
+  LightWeightHashSet excessBlocks = excessReplicateMap.get(
+  cur.getDatanodeUuid());
   if (excessBlocks == null || !excessBlocks.contains(block)) {
 if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
   // exclude corrupt replicas
@@ -3006,9 +3005,10 @@ public class BlockManager implements BlockStatsMXBean {
 
   private void addToExcessReplicate(DatanodeInfo dn, Block block) {
 assert namesystem.hasWriteLock();
-LightWeightLinkedSet excessBlocks = 
excessReplicateMap.get(dn.getDatanodeUuid());
+LightWeightHashSet excessB

hadoop git commit: HDFS-9137. DeadLock between DataNode#refreshVolumes and BPOfferService#registrationSucceeded. (Uma Maheswara Rao G via yliu)

2015-10-07 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2f4bcdef8 -> 22ed1471b


HDFS-9137. DeadLock between DataNode#refreshVolumes and 
BPOfferService#registrationSucceeded. (Uma Maheswara Rao G via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22ed1471
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22ed1471
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22ed1471

Branch: refs/heads/branch-2
Commit: 22ed1471bd5ef5988432fb4ae8034f4f90185405
Parents: 2f4bcde
Author: yliu <y...@apache.org>
Authored: Thu Oct 8 10:26:30 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Thu Oct 8 10:26:30 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hadoop/hdfs/server/datanode/DataNode.java   | 25 +++-
 2 files changed, 22 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22ed1471/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9250c2b..756052c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1169,6 +1169,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9211. Fix incorrect version in hadoop-hdfs-native-client/pom.xml
 from HDFS-9170 branch-2 backport. (Eric Payne via wang)
 
+HDFS-9137. DeadLock between DataNode#refreshVolumes and
+BPOfferService#registrationSucceeded. (Uma Maheswara Rao G via yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22ed1471/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index d1e0160..b9be20e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -502,12 +502,29 @@ public class DataNode extends ReconfigurableBase
   public void reconfigurePropertyImpl(String property, String newVal)
   throws ReconfigurationException {
 if (property.equals(DFS_DATANODE_DATA_DIR_KEY)) {
+  IOException rootException = null;
   try {
 LOG.info("Reconfiguring " + property + " to " + newVal);
 this.refreshVolumes(newVal);
   } catch (IOException e) {
-throw new ReconfigurationException(property, newVal,
-getConf().get(property), e);
+rootException = e;
+  } finally {
+// Send a full block report to let NN acknowledge the volume changes.
+try {
+  triggerBlockReport(
+  new BlockReportOptions.Factory().setIncremental(false).build());
+} catch (IOException e) {
+  LOG.warn("Exception while sending the block report after refreshing"
+  + " volumes " + property + " to " + newVal, e);
+  if (rootException == null) {
+rootException = e;
+  }
+} finally {
+  if (rootException != null) {
+throw new ReconfigurationException(property, newVal,
+getConf().get(property), rootException);
+  }
+}
   }
 } else {
   throw new ReconfigurationException(
@@ -689,10 +706,6 @@ public class DataNode extends ReconfigurableBase
   conf.set(DFS_DATANODE_DATA_DIR_KEY,
   Joiner.on(",").join(effectiveVolumes));
   dataDirs = getStorageLocations(conf);
-
-  // Send a full block report to let NN acknowledge the volume changes.
-  triggerBlockReport(new BlockReportOptions.Factory()
-  .setIncremental(false).build());
 }
   }
 



hadoop git commit: HDFS-9137. DeadLock between DataNode#refreshVolumes and BPOfferService#registrationSucceeded. (Uma Maheswara Rao G via yliu)

2015-10-07 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 66e2cfa1a -> 35affec38


HDFS-9137. DeadLock between DataNode#refreshVolumes and 
BPOfferService#registrationSucceeded. (Uma Maheswara Rao G via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35affec3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35affec3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35affec3

Branch: refs/heads/trunk
Commit: 35affec38e17e3f9c21d36be71476072c03f
Parents: 66e2cfa
Author: yliu <y...@apache.org>
Authored: Thu Oct 8 10:27:47 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Thu Oct 8 10:27:47 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hadoop/hdfs/server/datanode/DataNode.java   | 25 +++-
 2 files changed, 22 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35affec3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ee6d19d..e05a618 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1983,6 +1983,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9176. Fix TestDirectoryScanner#testThrottling often fails.
 (Daniel Templeton via lei)
 
+HDFS-9137. DeadLock between DataNode#refreshVolumes and
+BPOfferService#registrationSucceeded. (Uma Maheswara Rao G via yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35affec3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 63b8847..b280067 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -497,12 +497,29 @@ public class DataNode extends ReconfigurableBase
   public void reconfigurePropertyImpl(String property, String newVal)
   throws ReconfigurationException {
 if (property.equals(DFS_DATANODE_DATA_DIR_KEY)) {
+  IOException rootException = null;
   try {
 LOG.info("Reconfiguring " + property + " to " + newVal);
 this.refreshVolumes(newVal);
   } catch (IOException e) {
-throw new ReconfigurationException(property, newVal,
-getConf().get(property), e);
+rootException = e;
+  } finally {
+// Send a full block report to let NN acknowledge the volume changes.
+try {
+  triggerBlockReport(
+  new BlockReportOptions.Factory().setIncremental(false).build());
+} catch (IOException e) {
+  LOG.warn("Exception while sending the block report after refreshing"
+  + " volumes " + property + " to " + newVal, e);
+  if (rootException == null) {
+rootException = e;
+  }
+} finally {
+  if (rootException != null) {
+throw new ReconfigurationException(property, newVal,
+getConf().get(property), rootException);
+  }
+}
   }
 } else {
   throw new ReconfigurationException(
@@ -684,10 +701,6 @@ public class DataNode extends ReconfigurableBase
   conf.set(DFS_DATANODE_DATA_DIR_KEY,
   Joiner.on(",").join(effectiveVolumes));
   dataDirs = getStorageLocations(conf);
-
-  // Send a full block report to let NN acknowledge the volume changes.
-  triggerBlockReport(new BlockReportOptions.Factory()
-  .setIncremental(false).build());
 }
   }
 



hadoop git commit: HDFS-8859. Improve DataNode ReplicaMap memory footprint to save about 45%. (yliu)

2015-09-29 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 151fca503 -> d6fa34e01


HDFS-8859. Improve DataNode ReplicaMap memory footprint to save about 45%. 
(yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6fa34e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6fa34e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6fa34e0

Branch: refs/heads/trunk
Commit: d6fa34e014b0e2a61b24f05dd08ebe12354267fd
Parents: 151fca5
Author: yliu <y...@apache.org>
Authored: Tue Sep 29 16:20:35 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Tue Sep 29 16:20:35 2015 +0800

--
 .../main/java/org/apache/hadoop/util/GSet.java  |  14 ++
 .../org/apache/hadoop/util/GSetByHashMap.java   |   6 +
 .../org/apache/hadoop/util/LightWeightGSet.java |  82 --
 .../hadoop/util/LightWeightResizableGSet.java   | 129 ++
 .../java/org/apache/hadoop/util/TestGSet.java   |  69 -
 .../hadoop/util/TestLightWeightCache.java   |   6 +
 .../util/TestLightWeightResizableGSet.java  | 252 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/datanode/ReplicaInfo.java   |  27 +-
 .../datanode/fsdataset/impl/BlockPoolSlice.java |   7 +-
 .../datanode/fsdataset/impl/ReplicaMap.java |  38 +--
 11 files changed, 569 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6fa34e0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
index 26e73cf..e4a8d0f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.util;
 
+import java.util.Collection;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -86,5 +88,17 @@ public interface GSet<K, E extends K> extends Iterable {
   */
   E remove(K key);
 
+  /**
+   * Clear the set.
+   */
   void clear();
+
+  /**
+   * Returns a {@link Collection} view of the values contained in this set.
+   * The collection is backed by the set, so changes to the set are
+   * reflected in the collection, and vice-versa.
+   *
+   * @return the collection of values.
+   */
+  Collection values();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6fa34e0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java
index 87488db..e341c74 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.util;
 
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.Iterator;
 
@@ -70,4 +71,9 @@ public class GSetByHashMap<K, E extends K> implements GSet<K, 
E> {
   public void clear() {
 m.clear();
   }
+
+  @Override
+  public Collection values() {
+return m.values();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6fa34e0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
index 1767d85..7c7878a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.util;
 
 import java.io.PrintStream;
+import java.util.AbstractCollection;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.ConcurrentModificationException;
 import java.util.Iterator;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotations.Visi

hadoop git commit: HDFS-8859. Improve DataNode ReplicaMap memory footprint to save about 45%. (yliu)

2015-09-29 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1a5f3e93c -> 9c47ab32b


HDFS-8859. Improve DataNode ReplicaMap memory footprint to save about 45%. 
(yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c47ab32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c47ab32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c47ab32

Branch: refs/heads/branch-2
Commit: 9c47ab32b14fa9e589d54ad739b3721bb8d444f9
Parents: 1a5f3e9
Author: yliu <y...@apache.org>
Authored: Tue Sep 29 16:23:50 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Tue Sep 29 16:23:50 2015 +0800

--
 .../main/java/org/apache/hadoop/util/GSet.java  |  14 ++
 .../org/apache/hadoop/util/GSetByHashMap.java   |   6 +
 .../org/apache/hadoop/util/LightWeightGSet.java |  82 --
 .../hadoop/util/LightWeightResizableGSet.java   | 129 ++
 .../java/org/apache/hadoop/util/TestGSet.java   |  69 -
 .../hadoop/util/TestLightWeightCache.java   |   6 +
 .../util/TestLightWeightResizableGSet.java  | 252 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/datanode/ReplicaInfo.java   |  27 +-
 .../datanode/fsdataset/impl/BlockPoolSlice.java |   7 +-
 .../datanode/fsdataset/impl/ReplicaMap.java |  38 +--
 11 files changed, 569 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c47ab32/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
index 26e73cf..e4a8d0f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.util;
 
+import java.util.Collection;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -86,5 +88,17 @@ public interface GSet<K, E extends K> extends Iterable {
   */
   E remove(K key);
 
+  /**
+   * Clear the set.
+   */
   void clear();
+
+  /**
+   * Returns a {@link Collection} view of the values contained in this set.
+   * The collection is backed by the set, so changes to the set are
+   * reflected in the collection, and vice-versa.
+   *
+   * @return the collection of values.
+   */
+  Collection values();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c47ab32/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java
index 87488db..e341c74 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.util;
 
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.Iterator;
 
@@ -70,4 +71,9 @@ public class GSetByHashMap<K, E extends K> implements GSet<K, 
E> {
   public void clear() {
 m.clear();
   }
+
+  @Override
+  public Collection values() {
+return m.values();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c47ab32/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
index 1767d85..7c7878a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.util;
 
 import java.io.PrintStream;
+import java.util.AbstractCollection;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.ConcurrentModificationException;
 import java.util.Iterator;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotatio

hadoop git commit: HDFS-9141. Thread leak in Datanode#refreshVolumes. (Uma Maheswara Rao G via yliu)

2015-09-29 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9c47ab32b -> 28805cb8d


HDFS-9141. Thread leak in Datanode#refreshVolumes. (Uma Maheswara Rao G via 
yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28805cb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28805cb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28805cb8

Branch: refs/heads/branch-2
Commit: 28805cb8d7b117892df22792ffa9ad375297b023
Parents: 9c47ab3
Author: yliu <y...@apache.org>
Authored: Tue Sep 29 22:06:20 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Tue Sep 29 22:06:20 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../org/apache/hadoop/hdfs/server/datanode/DataNode.java | 11 ++-
 2 files changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28805cb8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0461766..a4b969a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1106,6 +1106,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9092. Nfs silently drops overlapping write requests and causes data
 copying to fail. (Yongjun Zhang)
 
+HDFS-9141. Thread leak in Datanode#refreshVolumes. (Uma Maheswara Rao G
+via yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28805cb8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 022989a..d1e0160 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -25,9 +25,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISS
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
@@ -611,7 +609,7 @@ public class DataNode extends ReconfigurableBase
   private synchronized void refreshVolumes(String newVolumes) throws 
IOException {
 Configuration conf = getConf();
 conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
-
+ExecutorService service = null;
 int numOldDataDirs = dataDirs.size();
 ChangedVolumes changedVolumes = parseChangedVolumes(newVolumes);
 StringBuilder errorMessageBuilder = new StringBuilder();
@@ -634,8 +632,8 @@ public class DataNode extends ReconfigurableBase
 for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
   nsInfos.add(bpos.getNamespaceInfo());
 }
-ExecutorService service = Executors.newFixedThreadPool(
-changedVolumes.newLocations.size());
+service = Executors
+.newFixedThreadPool(changedVolumes.newLocations.size());
 List<Future> exceptions = Lists.newArrayList();
 for (final StorageLocation location : changedVolumes.newLocations) {
   exceptions.add(service.submit(new Callable() {
@@ -685,6 +683,9 @@ public class DataNode extends ReconfigurableBase
 throw new IOException(errorMessageBuilder.toString());
   }
 } finally {
+  if (service != null) {
+service.shutdown();
+  }
   conf.set(DFS_DATANODE_DATA_DIR_KEY,
   Joiner.on(",").join(effectiveVolumes));
   dataDirs = getStorageLocations(conf);



hadoop git commit: HDFS-9141. Thread leak in Datanode#refreshVolumes. (Uma Maheswara Rao G via yliu)

2015-09-29 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk d6fa34e01 -> 715dbddf7


HDFS-9141. Thread leak in Datanode#refreshVolumes. (Uma Maheswara Rao G via 
yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/715dbddf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/715dbddf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/715dbddf

Branch: refs/heads/trunk
Commit: 715dbddf77866bb47a4b95421091f64a3785444f
Parents: d6fa34e
Author: yliu <y...@apache.org>
Authored: Tue Sep 29 22:05:34 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Tue Sep 29 22:05:34 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../org/apache/hadoop/hdfs/server/datanode/DataNode.java | 11 ++-
 2 files changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/715dbddf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 182464b..2c90b23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1453,6 +1453,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9092. Nfs silently drops overlapping write requests and causes data
 copying to fail. (Yongjun Zhang)
 
+HDFS-9141. Thread leak in Datanode#refreshVolumes. (Uma Maheswara Rao G
+via yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/715dbddf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 2646089..2fe67fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -25,9 +25,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISS
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
@@ -604,7 +602,7 @@ public class DataNode extends ReconfigurableBase
   private synchronized void refreshVolumes(String newVolumes) throws 
IOException {
 Configuration conf = getConf();
 conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
-
+ExecutorService service = null;
 int numOldDataDirs = dataDirs.size();
 ChangedVolumes changedVolumes = parseChangedVolumes(newVolumes);
 StringBuilder errorMessageBuilder = new StringBuilder();
@@ -627,8 +625,8 @@ public class DataNode extends ReconfigurableBase
 for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
   nsInfos.add(bpos.getNamespaceInfo());
 }
-ExecutorService service = Executors.newFixedThreadPool(
-changedVolumes.newLocations.size());
+service = Executors
+.newFixedThreadPool(changedVolumes.newLocations.size());
 List<Future> exceptions = Lists.newArrayList();
 for (final StorageLocation location : changedVolumes.newLocations) {
   exceptions.add(service.submit(new Callable() {
@@ -678,6 +676,9 @@ public class DataNode extends ReconfigurableBase
 throw new IOException(errorMessageBuilder.toString());
   }
 } finally {
+  if (service != null) {
+service.shutdown();
+  }
   conf.set(DFS_DATANODE_DATA_DIR_KEY,
   Joiner.on(",").join(effectiveVolumes));
   dataDirs = getStorageLocations(conf);



hadoop git commit: HADOOP-12448. TestTextCommand: use mkdirs rather than mkdir to create test directory. (Contributed by Colin Patrick McCabe and Chris Nauroth)

2015-09-29 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cb89ee931 -> 362522263


HADOOP-12448. TestTextCommand: use mkdirs rather than mkdir to create test 
directory. (Contributed by Colin Patrick McCabe and Chris Nauroth)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36252226
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36252226
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36252226

Branch: refs/heads/branch-2
Commit: 3625222635a50bf1e199e62c8f4d79a52165f0ea
Parents: cb89ee9
Author: yliu <y...@apache.org>
Authored: Wed Sep 30 11:04:09 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Wed Sep 30 11:04:09 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../org/apache/hadoop/fs/shell/TestTextCommand.java  | 15 +--
 2 files changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36252226/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0689313..04a172d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -595,6 +595,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12447. Clean up some htrace integration issues (cmccabe)
 
+HADOOP-12448. TestTextCommand: use mkdirs rather than mkdir to create test
+directory. (Contributed by Colin Patrick McCabe and Chris Nauroth)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36252226/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
index 70a2f03..0e33d6a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
@@ -22,11 +22,13 @@ import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.FileOutputStream;
-import java.io.InputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.StringWriter;
 import java.lang.reflect.Method;
 import java.net.URI;
+import java.nio.file.Files;
+import java.nio.file.Paths;
 
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -38,12 +40,13 @@ import org.junit.Test;
  * by the Text command.
  */
 public class TestTextCommand {
-  private static final String TEST_ROOT_DIR =
-System.getProperty("test.build.data", "build/test/data/") + "/testText";
+  private static final File TEST_ROOT_DIR =
+Paths.get(System.getProperty("test.build.data", "build/test/data"),
+"testText").toFile();
   private static final String AVRO_FILENAME =
-new Path(TEST_ROOT_DIR, "weather.avro").toUri().getPath();
+new File(TEST_ROOT_DIR, "weather.avro").toURI().getPath();
   private static final String TEXT_FILENAME =
-new Path(TEST_ROOT_DIR, "testtextfile.txt").toUri().getPath();
+new File(TEST_ROOT_DIR, "testtextfile.txt").toURI().getPath();
 
   /**
* Tests whether binary Avro data files are displayed correctly.
@@ -126,7 +129,7 @@ public class TestTextCommand {
   }
 
   private void createFile(String fileName, byte[] contents) throws IOException 
{
-(new File(TEST_ROOT_DIR)).mkdir();
+Files.createDirectories(TEST_ROOT_DIR.toPath());
 File file = new File(fileName);
 file.createNewFile();
 FileOutputStream stream = new FileOutputStream(file);



hadoop git commit: HADOOP-12448. TestTextCommand: use mkdirs rather than mkdir to create test directory. (Contributed by Colin Patrick McCabe and Chris Nauroth)

2015-09-29 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 071733dc6 -> 06abc57a9


HADOOP-12448. TestTextCommand: use mkdirs rather than mkdir to create test 
directory. (Contributed by Colin Patrick McCabe and Chris Nauroth)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06abc57a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06abc57a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06abc57a

Branch: refs/heads/trunk
Commit: 06abc57a90bb2ac86121cc803a60dab82609da88
Parents: 071733d
Author: yliu <y...@apache.org>
Authored: Wed Sep 30 10:56:22 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Wed Sep 30 10:56:22 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../org/apache/hadoop/fs/shell/TestTextCommand.java  | 15 +--
 2 files changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06abc57a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ec7d1c6..84535d6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1100,6 +1100,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12447. Clean up some htrace integration issues (cmccabe)
 
+HADOOP-12448. TestTextCommand: use mkdirs rather than mkdir to create test
+directory. (Contributed by Colin Patrick McCabe and Chris Nauroth)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06abc57a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
index 70a2f03..0e33d6a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
@@ -22,11 +22,13 @@ import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.FileOutputStream;
-import java.io.InputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.StringWriter;
 import java.lang.reflect.Method;
 import java.net.URI;
+import java.nio.file.Files;
+import java.nio.file.Paths;
 
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -38,12 +40,13 @@ import org.junit.Test;
  * by the Text command.
  */
 public class TestTextCommand {
-  private static final String TEST_ROOT_DIR =
-System.getProperty("test.build.data", "build/test/data/") + "/testText";
+  private static final File TEST_ROOT_DIR =
+Paths.get(System.getProperty("test.build.data", "build/test/data"),
+"testText").toFile();
   private static final String AVRO_FILENAME =
-new Path(TEST_ROOT_DIR, "weather.avro").toUri().getPath();
+new File(TEST_ROOT_DIR, "weather.avro").toURI().getPath();
   private static final String TEXT_FILENAME =
-new Path(TEST_ROOT_DIR, "testtextfile.txt").toUri().getPath();
+new File(TEST_ROOT_DIR, "testtextfile.txt").toURI().getPath();
 
   /**
* Tests whether binary Avro data files are displayed correctly.
@@ -126,7 +129,7 @@ public class TestTextCommand {
   }
 
   private void createFile(String fileName, byte[] contents) throws IOException 
{
-(new File(TEST_ROOT_DIR)).mkdir();
+Files.createDirectories(TEST_ROOT_DIR.toPath());
 File file = new File(fileName);
 file.createNewFile();
 FileOutputStream stream = new FileOutputStream(file);



hadoop git commit: HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. (Kihwal Lee via yliu)

2015-09-01 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 acb300018 -> 0fb6b6810


HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. 
(Kihwal Lee via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fb6b681
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fb6b681
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fb6b681

Branch: refs/heads/branch-2
Commit: 0fb6b68101001221e14c2e86f4a63028074cced0
Parents: acb3000
Author: yliu <y...@apache.org>
Authored: Wed Sep 2 09:01:31 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Wed Sep 2 09:01:31 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/datanode/BPOfferService.java| 3 +--
 .../apache/hadoop/hdfs/server/datanode/BPServiceActor.java| 7 ---
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 5 +
 4 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb6b681/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6cf0d4f..73a93b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -958,6 +958,9 @@ Release 2.7.2 - UNRELEASED
 
 HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
 
+HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect.
+(Kihwal Lee via yliu)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb6b681/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 5097e4a..b3d363f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -351,9 +351,8 @@ class BPOfferService {
 reg.getStorageInfo().getNamespaceID(), "namespace ID");
 checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
 reg.getStorageInfo().getClusterID(), "cluster ID");
-  } else {
-bpRegistration = reg;
   }
+  bpRegistration = reg;
 
   dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
   // Add the initial block token secret keys to the DN's secret manager.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb6b681/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 1817427..85ea6ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -767,15 +767,16 @@ class BPServiceActor implements Runnable {
   void register(NamespaceInfo nsInfo) throws IOException {
 // The handshake() phase loaded the block pool storage
 // off disk - so update the bpRegistration object from that info
-bpRegistration = bpos.createRegistration();
+DatanodeRegistration newBpRegistration = bpos.createRegistration();
 
 LOG.info(this + " beginning handshake with NN");
 
 while (shouldRun()) {
   try {
 // Use returned registration from namenode with updated fields
-bpRegistration = bpNamenode.registerDatanode(bpRegistration);
-bpRegistration.setNamespaceInfo(nsInfo);
+newBpRegistration = bpNamenode.registerDatanode(newBpRegistration);
+newBpRegistration.setNamespaceInfo(nsInfo);
+bpRegistration = newBpRegistration;
 break;
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb6b681/hadoop-hdfs-project/hadoop-hdfs/s

hadoop git commit: HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. (Kihwal Lee via yliu)

2015-09-01 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 015696fb8 -> 3ab43acca


HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. 
(Kihwal Lee via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ab43acc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ab43acc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ab43acc

Branch: refs/heads/branch-2.7
Commit: 3ab43accaf4226fcb2152fa005a75de592fd1f0e
Parents: 015696f
Author: yliu <y...@apache.org>
Authored: Wed Sep 2 09:00:22 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Wed Sep 2 09:00:22 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/datanode/BPOfferService.java| 3 +--
 .../apache/hadoop/hdfs/server/datanode/BPServiceActor.java| 7 ---
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 5 +
 4 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ab43acc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b5bb7b0..2a41a9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -35,6 +35,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-8879. Quota by storage type usage incorrectly initialized upon 
namenode
 restart. (xyao)
 
+HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect.
+(Kihwal Lee via yliu)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ab43acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 36a868e..88b8312 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -350,9 +350,8 @@ class BPOfferService {
 reg.getStorageInfo().getNamespaceID(), "namespace ID");
 checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
 reg.getStorageInfo().getClusterID(), "cluster ID");
-  } else {
-bpRegistration = reg;
   }
+  bpRegistration = reg;
 
   dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
   // Add the initial block token secret keys to the DN's secret manager.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ab43acc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 49a1991..ca6cc03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -744,15 +744,16 @@ class BPServiceActor implements Runnable {
   void register(NamespaceInfo nsInfo) throws IOException {
 // The handshake() phase loaded the block pool storage
 // off disk - so update the bpRegistration object from that info
-bpRegistration = bpos.createRegistration();
+DatanodeRegistration newBpRegistration = bpos.createRegistration();
 
 LOG.info(this + " beginning handshake with NN");
 
 while (shouldRun()) {
   try {
 // Use returned registration from namenode with updated fields
-bpRegistration = bpNamenode.registerDatanode(bpRegistration);
-bpRegistration.setNamespaceInfo(nsInfo);
+newBpRegistration = bpNamenode.registerDatanode(newBpRegistration);
+newBpRegistration.setNamespaceInfo(nsInfo);
+bpRegistration = newBpRegistration;
 break;
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ab43acc/hadoop-hdfs-project/hadoop-hdfs/s

hadoop git commit: HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. (Kihwal Lee via yliu)

2015-09-01 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 462076715 -> 5652131d2


HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. 
(Kihwal Lee via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5652131d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5652131d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5652131d

Branch: refs/heads/trunk
Commit: 5652131d2ea68c408dd3cd8bee31723642a8cdde
Parents: 4620767
Author: yliu <y...@apache.org>
Authored: Wed Sep 2 08:58:51 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Wed Sep 2 08:58:51 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/datanode/BPOfferService.java| 3 +--
 .../apache/hadoop/hdfs/server/datanode/BPServiceActor.java| 7 ---
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 5 +
 4 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5652131d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 57ddcb2..ea398f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1302,6 +1302,9 @@ Release 2.7.2 - UNRELEASED
 
 HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
 
+HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect.
+(Kihwal Lee via yliu)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5652131d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 92323f1..7aab4f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -351,9 +351,8 @@ class BPOfferService {
 reg.getStorageInfo().getNamespaceID(), "namespace ID");
 checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
 reg.getStorageInfo().getClusterID(), "cluster ID");
-  } else {
-bpRegistration = reg;
   }
+  bpRegistration = reg;
 
   dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
   // Add the initial block token secret keys to the DN's secret manager.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5652131d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 1817427..85ea6ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -767,15 +767,16 @@ class BPServiceActor implements Runnable {
   void register(NamespaceInfo nsInfo) throws IOException {
 // The handshake() phase loaded the block pool storage
 // off disk - so update the bpRegistration object from that info
-bpRegistration = bpos.createRegistration();
+DatanodeRegistration newBpRegistration = bpos.createRegistration();
 
 LOG.info(this + " beginning handshake with NN");
 
 while (shouldRun()) {
   try {
 // Use returned registration from namenode with updated fields
-bpRegistration = bpNamenode.registerDatanode(bpRegistration);
-bpRegistration.setNamespaceInfo(nsInfo);
+newBpRegistration = bpNamenode.registerDatanode(newBpRegistration);
+newBpRegistration.setNamespaceInfo(nsInfo);
+bpRegistration = newBpRegistration;
 break;
   } catch(EOFException e) {  // namenode might have just restarted
 LOG.info("Problem connecting to server: " + nnAddr + " :"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5652131d/hadoop-hdfs-project/hadoop-hdfs/s

hadoop git commit: HADOOP-12367. Move TestFileUtil's test resources to resources folder. (wang via yliu)

2015-09-01 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8dbe2a12b -> 2345627ad


HADOOP-12367. Move TestFileUtil's test resources to resources folder. (wang via 
yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2345627a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2345627a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2345627a

Branch: refs/heads/branch-2
Commit: 2345627ad3f81c5854cc6021ea102d1f62633515
Parents: 8dbe2a1
Author: yliu <y...@apache.org>
Authored: Tue Sep 1 16:25:11 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Tue Sep 1 16:25:11 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   9 ++---
 hadoop-common-project/hadoop-common/pom.xml |  19 +--
 .../java/org/apache/hadoop/fs/test-untar.tar| Bin 20480 -> 0 bytes
 .../java/org/apache/hadoop/fs/test-untar.tgz| Bin 2024 -> 0 bytes
 .../src/test/resources/test-untar.tar   | Bin 0 -> 20480 bytes
 .../src/test/resources/test-untar.tgz   | Bin 0 -> 2024 bytes
 6 files changed, 7 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2345627a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d813bed..bb269c8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -175,6 +175,12 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12050. Enable MaxInactiveInterval for hadoop http auth token
 (hzlu via benoyantony)
 
+HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.
+(wang)
+
+HADOOP-12367. Move TestFileUtil's test resources to resources folder.
+(wang via yliu)
+
   BUG FIXES
 
 HADOOP-12124. Add HTrace support for FsShell (cmccabe)
@@ -246,9 +252,6 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12325. RPC Metrics : Add the ability track and log slow RPCs.
 (Anu Engineer via xyao)
 
-HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.
-(wang)
-
  OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2345627a/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 6deadd8..a064a57 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -448,23 +448,6 @@
 
   
   
-copy-test-tarballs
-process-test-resources
-
-  run
-
-
-  
-
-  
-
-
-  
-
-  
-
-  
-  
 pre-site
 
   run
@@ -500,7 +483,7 @@
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h
-
src/test/java/org/apache/hadoop/fs/test-untar.tgz
+src/test/resources/test-untar.tgz
 src/test/resources/test.har/_SUCCESS
 src/test/resources/test.har/_index
 src/test/resources/test.har/_masterindex

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2345627a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
deleted file mode 100644
index 949e985..000
Binary files 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2345627a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
deleted file mode 100644
index 9e9ef40..000
Binary files 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/te

hadoop git commit: HADOOP-12367. Move TestFileUtil's test resources to resources folder. (wang via yliu)

2015-09-01 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7ad3556ed -> f4d96be6c


HADOOP-12367. Move TestFileUtil's test resources to resources folder. (wang via 
yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4d96be6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4d96be6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4d96be6

Branch: refs/heads/trunk
Commit: f4d96be6c637ff54903615cff04b365e25bb3229
Parents: 7ad3556
Author: yliu <y...@apache.org>
Authored: Tue Sep 1 16:20:56 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Tue Sep 1 16:20:56 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +++
 hadoop-common-project/hadoop-common/pom.xml |  19 +--
 .../java/org/apache/hadoop/fs/test-untar.tar| Bin 20480 -> 0 bytes
 .../java/org/apache/hadoop/fs/test-untar.tgz| Bin 2024 -> 0 bytes
 .../src/test/resources/test-untar.tar   | Bin 0 -> 20480 bytes
 .../src/test/resources/test-untar.tgz   | Bin 0 -> 2024 bytes
 6 files changed, 4 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0f52d22..14e6fda 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -759,6 +759,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12368. Mark ViewFileSystemBaseTest and ViewFsBaseTest as abstract.
 (wang)
 
+HADOOP-12367. Move TestFileUtil's test resources to resources folder.
+(wang via yliu)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 282735d..3ae09a0 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -454,23 +454,6 @@
 
   
   
-copy-test-tarballs
-process-test-resources
-
-  run
-
-
-  
-
-  
-
-
-  
-
-  
-
-  
-  
 pre-site
 
   run
@@ -505,7 +488,7 @@
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c
 
src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h
-
src/test/java/org/apache/hadoop/fs/test-untar.tgz
+src/test/resources/test-untar.tgz
 src/test/resources/test.har/_SUCCESS
 src/test/resources/test.har/_index
 src/test/resources/test.har/_masterindex

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
deleted file mode 100644
index 949e985..000
Binary files 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tar
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
deleted file mode 100644
index 9e9ef40..000
Binary files 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/test-untar.tgz
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d96be6/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar 
b/hadoop-common-project/hadoop-common/src/test/resources/test-untar.tar
new file mode 100644
index 000..949e985
Bi

hadoop git commit: HDFS-8946. Improve choosing datanode storage for block placement. (yliu)

2015-08-31 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 21b478e1b -> d852ec1f7


HDFS-8946. Improve choosing datanode storage for block placement. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d852ec1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d852ec1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d852ec1f

Branch: refs/heads/branch-2
Commit: d852ec1f77bcffa3db84c81ea374c45bcb2b19c1
Parents: 21b478e
Author: yliu <y...@apache.org>
Authored: Tue Sep 1 08:51:32 2015 +0800
Committer: yliu <y...@apache.org>
Committed: Tue Sep 1 08:51:32 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../BlockPlacementPolicyDefault.java| 147 ++-
 .../blockmanagement/DatanodeDescriptor.java |  36 +++--
 .../blockmanagement/TestReplicationPolicy.java  |  26 +++-
 4 files changed, 93 insertions(+), 118 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d852ec1f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 81837f1..bd46606 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -525,6 +525,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8990. Move RemoteBlockReader to hdfs-client module.
 (Mingliang via wheat9)
 
+HDFS-8946. Improve choosing datanode storage for block placement. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d852ec1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 6d7a765..f761150 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -26,12 +26,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
@@ -458,19 +455,18 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
 .entrySet().iterator(); iter.hasNext(); ) {
   Map.Entry<StorageType, Integer> entry = iter.next();
-  for (DatanodeStorageInfo localStorage : DFSUtil.shuffle(
-  localDatanode.getStorageInfos())) {
-StorageType type = entry.getKey();
-if (addIfIsGoodTarget(localStorage, excludedNodes, blocksize,
-results, type) >= 0) {
-  int num = entry.getValue();
-  if (num == 1) {
-iter.remove();
-  } else {
-entry.setValue(num - 1);
-  }
-  return localStorage;
+  DatanodeStorageInfo localStorage = chooseStorage4Block(
+  localDatanode, blocksize, results, entry.getKey());
+  if (localStorage != null) {
+// add node and related nodes to excludedNode
+addToExcludedNodes(localDatanode, excludedNodes);
+int num = entry.getValue();
+if (num == 1) {
+  iter.remove();
+} else {
+  entry.setValue(num - 1);
 }
+return localStorage;
   }
 }
   } 
@@ -651,7 +647,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 boolean avoidStaleNodes,
 EnumMap<StorageType, Integer> storageTypes)
 throws NotEnoughReplicasExceptio

hadoop git commit: HDFS-8963. Fix incorrect sign extension of xattr length in HDFS-8900. (Colin Patrick McCabe via yliu)

2015-08-27 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 07a19325c - 5f9b323eb


HDFS-8963. Fix incorrect sign extension of xattr length in HDFS-8900. (Colin 
Patrick McCabe via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f9b323e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f9b323e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f9b323e

Branch: refs/heads/branch-2
Commit: 5f9b323eb09affbccff43d31a7026a911121e89c
Parents: 07a1932
Author: yliu y...@apache.org
Authored: Fri Aug 28 10:53:53 2015 +0800
Committer: yliu y...@apache.org
Committed: Fri Aug 28 10:53:53 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop/hdfs/server/namenode/XAttrFormat.java  | 14 ++
 .../hadoop/hdfs/server/namenode/TestXAttrFeature.java | 12 
 3 files changed, 25 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f9b323e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a6c0e74..e6e11dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -899,6 +899,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8969. Clean up findbugs warnings for HDFS-8823 and HDFS-8932.
 (Anu Engineer via wheat9)
 
+HDFS-8963. Fix incorrect sign extension of xattr length in HDFS-8900.
+(Colin Patrick McCabe via yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f9b323e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
index 6167dac..7e704d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
@@ -61,12 +61,15 @@ class XAttrFormat {
 for (int i = 0; i  attrs.length;) {
   XAttr.Builder builder = new XAttr.Builder();
   // big-endian
-  int v = Ints.fromBytes(attrs[i++], attrs[i++], attrs[i++], attrs[i++]);
+  int v = Ints.fromBytes(attrs[i], attrs[i + 1],
+  attrs[i + 2], attrs[i + 3]);
+  i += 4;
   int ns = (v  XATTR_NAMESPACE_OFFSET)  XATTR_NAMESPACE_MASK;
   int nid = v  XATTR_NAME_MASK;
   builder.setNameSpace(XATTR_NAMESPACE_VALUES[ns]);
   builder.setName(XAttrStorage.getName(nid));
-  int vlen = (attrs[i++]  8) | attrs[i++];
+  int vlen = ((0xff  attrs[i])  8) | (0xff  attrs[i + 1]);
+  i += 2;
   if (vlen  0) {
 byte[] value = new byte[vlen];
 System.arraycopy(attrs, i, value, 0, vlen);
@@ -94,12 +97,15 @@ class XAttrFormat {
 XAttr xAttr = XAttrHelper.buildXAttr(prefixedName);
 for (int i = 0; i  attrs.length;) {
   // big-endian
-  int v = Ints.fromBytes(attrs[i++], attrs[i++], attrs[i++], attrs[i++]);
+  int v = Ints.fromBytes(attrs[i], attrs[i + 1],
+  attrs[i + 2], attrs[i + 3]);
+  i += 4;
   int ns = (v  XATTR_NAMESPACE_OFFSET)  XATTR_NAMESPACE_MASK;
   int nid = v  XATTR_NAME_MASK;
   XAttr.NameSpace namespace = XATTR_NAMESPACE_VALUES[ns];
   String name = XAttrStorage.getName(nid);
-  int vlen = (attrs[i++]  8) | attrs[i++];
+  int vlen = ((0xff  attrs[i])  8) | (0xff  attrs[i + 1]);
+  i += 2;
   if (xAttr.getNameSpace() == namespace 
   xAttr.getName().equals(name)) {
 if (vlen  0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f9b323e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
index fcb157e..5b0922d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
@@ -43,6 +43,14 @@ public class TestXAttrFeature {
   static final String name7 = raw.a7;
   static final byte[] value7 = {0x011, 0x012

hadoop git commit: HDFS-8963. Fix incorrect sign extension of xattr length in HDFS-8900. (Colin Patrick McCabe via yliu)

2015-08-27 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 035ed2614 - e166c038c


HDFS-8963. Fix incorrect sign extension of xattr length in HDFS-8900. (Colin 
Patrick McCabe via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e166c038
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e166c038
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e166c038

Branch: refs/heads/trunk
Commit: e166c038c0aaa57b245f985a1c0fadd5fe33c384
Parents: 035ed26
Author: yliu y...@apache.org
Authored: Fri Aug 28 10:54:55 2015 +0800
Committer: yliu y...@apache.org
Committed: Fri Aug 28 10:54:55 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop/hdfs/server/namenode/XAttrFormat.java  | 14 ++
 .../hadoop/hdfs/server/namenode/TestXAttrFeature.java | 12 
 3 files changed, 25 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e166c038/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9cc3326..b699fceb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1243,6 +1243,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8969. Clean up findbugs warnings for HDFS-8823 and HDFS-8932.
 (Anu Engineer via wheat9)
 
+HDFS-8963. Fix incorrect sign extension of xattr length in HDFS-8900.
+(Colin Patrick McCabe via yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e166c038/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
index 6167dac..7e704d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
@@ -61,12 +61,15 @@ class XAttrFormat {
 for (int i = 0; i  attrs.length;) {
   XAttr.Builder builder = new XAttr.Builder();
   // big-endian
-  int v = Ints.fromBytes(attrs[i++], attrs[i++], attrs[i++], attrs[i++]);
+  int v = Ints.fromBytes(attrs[i], attrs[i + 1],
+  attrs[i + 2], attrs[i + 3]);
+  i += 4;
   int ns = (v  XATTR_NAMESPACE_OFFSET)  XATTR_NAMESPACE_MASK;
   int nid = v  XATTR_NAME_MASK;
   builder.setNameSpace(XATTR_NAMESPACE_VALUES[ns]);
   builder.setName(XAttrStorage.getName(nid));
-  int vlen = (attrs[i++]  8) | attrs[i++];
+  int vlen = ((0xff  attrs[i])  8) | (0xff  attrs[i + 1]);
+  i += 2;
   if (vlen  0) {
 byte[] value = new byte[vlen];
 System.arraycopy(attrs, i, value, 0, vlen);
@@ -94,12 +97,15 @@ class XAttrFormat {
 XAttr xAttr = XAttrHelper.buildXAttr(prefixedName);
 for (int i = 0; i  attrs.length;) {
   // big-endian
-  int v = Ints.fromBytes(attrs[i++], attrs[i++], attrs[i++], attrs[i++]);
+  int v = Ints.fromBytes(attrs[i], attrs[i + 1],
+  attrs[i + 2], attrs[i + 3]);
+  i += 4;
   int ns = (v  XATTR_NAMESPACE_OFFSET)  XATTR_NAMESPACE_MASK;
   int nid = v  XATTR_NAME_MASK;
   XAttr.NameSpace namespace = XATTR_NAMESPACE_VALUES[ns];
   String name = XAttrStorage.getName(nid);
-  int vlen = (attrs[i++]  8) | attrs[i++];
+  int vlen = ((0xff  attrs[i])  8) | (0xff  attrs[i + 1]);
+  i += 2;
   if (xAttr.getNameSpace() == namespace 
   xAttr.getName().equals(name)) {
 if (vlen  0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e166c038/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
index fcb157e..5b0922d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
@@ -43,6 +43,14 @@ public class TestXAttrFeature {
   static final String name7 = raw.a7;
   static final byte[] value7 = {0x011, 0x012, 0x013

hadoop git commit: HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)

2015-08-25 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e99349830 - df5dbf317


HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df5dbf31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df5dbf31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df5dbf31

Branch: refs/heads/branch-2
Commit: df5dbf317d0fb2d0e35f016e75c230d0f74235fd
Parents: e993498
Author: yliu y...@apache.org
Authored: Tue Aug 25 16:14:11 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Aug 25 16:14:11 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../org/apache/hadoop/hdfs/XAttrHelper.java |  13 +-
 .../BlockStoragePolicySuite.java|   5 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  29 ++--
 .../hdfs/server/namenode/FSDirectory.java   |  60 ---
 .../server/namenode/FSImageFormatPBINode.java   |   6 +-
 .../hdfs/server/namenode/INodeDirectory.java|  11 +-
 .../server/namenode/SerialNumberManager.java|  44 --
 .../hdfs/server/namenode/SerialNumberMap.java   |  79 ++
 .../hdfs/server/namenode/XAttrFeature.java  |  78 +-
 .../hdfs/server/namenode/XAttrFormat.java   | 155 +++
 .../server/namenode/XAttrPermissionFilter.java  |   6 +-
 .../hdfs/server/namenode/XAttrStorage.java  |  62 +++-
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|   6 +-
 .../src/main/resources/hdfs-default.xml |   4 +-
 .../hdfs/server/namenode/TestStartup.java   |  27 +---
 .../hdfs/server/namenode/TestXAttrFeature.java  | 107 +
 18 files changed, 502 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df5dbf31/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3100fd0..59817d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -488,6 +488,8 @@ Release 2.8.0 - UNRELEASED
 ReplicaUnderConstruction as a separate class and replicas as an array.
 (jing9)
 
+HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df5dbf31/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 963196b..2eefeb0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -314,6 +314,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
   public static final String  DFS_NAMENODE_MAX_XATTR_SIZE_KEY = 
dfs.namenode.fs-limits.max-xattr-size;
   public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
+  public static final int DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT = 32768;
 
 
   //Following keys have no defaults

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df5dbf31/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
index 5cafb3c..2655c40 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
@@ -130,7 +130,7 @@ public class XAttrHelper {
 }
 MapString, byte[] xAttrMap = Maps.newHashMap();
 for (XAttr xAttr : xAttrs) {
-  String name = getPrefixName(xAttr);
+  String name = getPrefixedName(xAttr);
   byte[] value = xAttr.getValue();
   if (value == null) {
 value = new byte[0];
@@ -144,13 +144,16 @@ public class XAttrHelper {
   /**
* Get name with prefix from codeXAttr/code
*/
-  public static String getPrefixName(XAttr xAttr) {
+  public static String getPrefixedName(XAttr xAttr) {
 if (xAttr == null) {
   return null;
 }
-
-String

hadoop git commit: HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)

2015-08-25 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk af7876787 - eee0d4563


HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eee0d456
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eee0d456
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eee0d456

Branch: refs/heads/trunk
Commit: eee0d4563c62647cfaaed6605ee713aaf69add78
Parents: af78767
Author: yliu y...@apache.org
Authored: Tue Aug 25 16:16:09 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Aug 25 16:16:09 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../org/apache/hadoop/hdfs/XAttrHelper.java |  13 +-
 .../BlockStoragePolicySuite.java|   5 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  29 ++--
 .../hdfs/server/namenode/FSDirectory.java   |  60 ---
 .../server/namenode/FSImageFormatPBINode.java   |   6 +-
 .../hdfs/server/namenode/INodeDirectory.java|  11 +-
 .../server/namenode/SerialNumberManager.java|  44 --
 .../hdfs/server/namenode/SerialNumberMap.java   |  79 ++
 .../hdfs/server/namenode/XAttrFeature.java  |  78 +-
 .../hdfs/server/namenode/XAttrFormat.java   | 155 +++
 .../server/namenode/XAttrPermissionFilter.java  |   6 +-
 .../hdfs/server/namenode/XAttrStorage.java  |  62 +++-
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|   6 +-
 .../src/main/resources/hdfs-default.xml |   4 +-
 .../hdfs/server/namenode/TestStartup.java   |  27 +---
 .../hdfs/server/namenode/TestXAttrFeature.java  | 107 +
 18 files changed, 502 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7aadcc6..2c47b50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -833,6 +833,8 @@ Release 2.8.0 - UNRELEASED
 ReplicaUnderConstruction as a separate class and replicas as an array.
 (jing9)
 
+HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9b14168..e6802a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -318,6 +318,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
   public static final String  DFS_NAMENODE_MAX_XATTR_SIZE_KEY = 
dfs.namenode.fs-limits.max-xattr-size;
   public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
+  public static final int DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT = 32768;
 
 
   //Following keys have no defaults

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
index 5cafb3c..2655c40 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
@@ -130,7 +130,7 @@ public class XAttrHelper {
 }
 MapString, byte[] xAttrMap = Maps.newHashMap();
 for (XAttr xAttr : xAttrs) {
-  String name = getPrefixName(xAttr);
+  String name = getPrefixedName(xAttr);
   byte[] value = xAttr.getValue();
   if (value == null) {
 value = new byte[0];
@@ -144,13 +144,16 @@ public class XAttrHelper {
   /**
* Get name with prefix from codeXAttr/code
*/
-  public static String getPrefixName(XAttr xAttr) {
+  public static String getPrefixedName(XAttr xAttr) {
 if (xAttr == null) {
   return null;
 }
-
-String namespace

hadoop git commit: HDFS-8884. Fail-fast check in BlockPlacementPolicyDefault#chooseTarget. (yliu)

2015-08-20 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 36b1a1e78 - 80a29906b


HDFS-8884. Fail-fast check in BlockPlacementPolicyDefault#chooseTarget. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80a29906
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80a29906
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80a29906

Branch: refs/heads/trunk
Commit: 80a29906bcd718bbba223fa099e523281d9f3369
Parents: 36b1a1e
Author: yliu y...@apache.org
Authored: Thu Aug 20 20:07:18 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Aug 20 20:07:18 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../BlockPlacementPolicyDefault.java| 176 ---
 .../BlockPlacementPolicyWithNodeGroup.java  |  35 +---
 .../TestDefaultBlockPlacementPolicy.java|  49 +-
 4 files changed, 161 insertions(+), 102 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80a29906/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 080f0d4..a0ca52a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -819,6 +819,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8917. Cleanup BlockInfoUnderConstruction from comments and tests.
 (Zhe Zhang via jing9)
 
+HDFS-8884. Fail-fast check in BlockPlacementPolicyDefault#chooseTarget.
+(yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80a29906/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 9023e0a..3aea5c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -437,17 +437,11 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 return writer;
   }
-  
-  /**
-   * Choose ilocalMachine/i as the target.
-   * if ilocalMachine/i is not available, 
-   * choose a node on the same rack
-   * @return the chosen storage
-   */
+
   protected DatanodeStorageInfo chooseLocalStorage(Node localMachine,
   SetNode excludedNodes, long blocksize, int maxNodesPerRack,
   ListDatanodeStorageInfo results, boolean avoidStaleNodes,
-  EnumMapStorageType, Integer storageTypes, boolean fallbackToLocalRack)
+  EnumMapStorageType, Integer storageTypes)
   throws NotEnoughReplicasException {
 // if no local machine, randomly choose one node
 if (localMachine == null) {
@@ -458,7 +452,9 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
  clusterMap.contains(localMachine)) {
   DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine;
   // otherwise try local machine first
-  if (excludedNodes.add(localMachine)) { // was not in the excluded list
+  if (excludedNodes.add(localMachine) // was not in the excluded list
+   isGoodDatanode(localDatanode, maxNodesPerRack, false,
+  results, avoidStaleNodes)) {
 for (IteratorMap.EntryStorageType, Integer iter = storageTypes
 .entrySet().iterator(); iter.hasNext(); ) {
   Map.EntryStorageType, Integer entry = iter.next();
@@ -466,7 +462,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   localDatanode.getStorageInfos())) {
 StorageType type = entry.getKey();
 if (addIfIsGoodTarget(localStorage, excludedNodes, blocksize,
-maxNodesPerRack, false, results, avoidStaleNodes, type) = 0) {
+results, type) = 0) {
   int num = entry.getValue();
   if (num == 1) {
 iter.remove();
@@ -479,6 +475,26 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 }
   } 
 }
+return null;
+  }
+
+  /**
+   * Choose ilocalMachine/i as the target.
+   * if ilocalMachine/i is not available,
+   * choose a node on the same rack
+   * @return

hadoop git commit: HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is flawed. (Kihwal Lee via yliu)

2015-08-20 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 80a29906b - 5e8fe8943


HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is flawed. 
(Kihwal Lee via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e8fe894
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e8fe894
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e8fe894

Branch: refs/heads/trunk
Commit: 5e8fe8943718309b5e39a794360aebccae28b331
Parents: 80a2990
Author: yliu y...@apache.org
Authored: Thu Aug 20 20:15:03 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Aug 20 20:15:03 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  7 +-
 .../BlockPlacementPolicyDefault.java|  3 +-
 .../blockmanagement/DatanodeDescriptor.java | 23 --
 .../blockmanagement/TestReplicationPolicy.java  | 80 +++-
 4 files changed, 84 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e8fe894/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a0ca52a..041582f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1196,11 +1196,11 @@ Release 2.7.2 - UNRELEASED
 
   IMPROVEMENTS
 
-  HDFS-8659. Block scanner INFO message is spamming logs. (Yongjun Zhang)
+HDFS-8659. Block scanner INFO message is spamming logs. (Yongjun Zhang)
 
   OPTIMIZATIONS
 
-  HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
+HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
 
   BUG FIXES
 
@@ -1215,6 +1215,9 @@ Release 2.7.2 - UNRELEASED
 
 HDFS-8867. Enable optimized block reports. (Daryn Sharp via jing9)
 
+HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is
+flawed. (Kihwal Lee via yliu)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e8fe894/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 3aea5c9..6d7a765 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -868,7 +868,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 final long requiredSize = blockSize * 
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE;
 final long scheduledSize = blockSize * 
node.getBlocksScheduled(storage.getStorageType());
-final long remaining = node.getRemaining(storage.getStorageType());
+final long remaining = node.getRemaining(storage.getStorageType(),
+requiredSize);
 if (requiredSize  remaining - scheduledSize) {
   logNodeIsNotChosen(storage, the node does not have enough 
   + storage.getStorageType() +  space

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e8fe894/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 9334b5c..7e3c59b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.hdfs.util.EnumCounters;
@@ -662,16 +663,26 @@ public class

hadoop git commit: HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is flawed. (Kihwal Lee via yliu)

2015-08-20 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 63bdbb779 - 146db49f7


HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is flawed. 
(Kihwal Lee via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/146db49f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/146db49f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/146db49f

Branch: refs/heads/branch-2
Commit: 146db49f7ff0dec82cd51f366311030404b770d7
Parents: 63bdbb7
Author: yliu y...@apache.org
Authored: Thu Aug 20 20:46:45 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Aug 20 20:46:45 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  7 +-
 .../BlockPlacementPolicyDefault.java|  3 +-
 .../blockmanagement/DatanodeDescriptor.java | 23 --
 .../blockmanagement/TestReplicationPolicy.java  | 80 +++-
 4 files changed, 84 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/146db49f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2cf2082..f0a7060 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -852,11 +852,11 @@ Release 2.7.2 - UNRELEASED
 
   IMPROVEMENTS
 
-  HDFS-8659. Block scanner INFO message is spamming logs. (Yongjun Zhang)
+HDFS-8659. Block scanner INFO message is spamming logs. (Yongjun Zhang)
 
   OPTIMIZATIONS
 
-  HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
+HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
 
   BUG FIXES
 
@@ -871,6 +871,9 @@ Release 2.7.2 - UNRELEASED
 
 HDFS-8867. Enable optimized block reports. (Daryn Sharp via jing9)
 
+HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is
+flawed. (Kihwal Lee via yliu)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/146db49f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 3aea5c9..6d7a765 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -868,7 +868,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 final long requiredSize = blockSize * 
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE;
 final long scheduledSize = blockSize * 
node.getBlocksScheduled(storage.getStorageType());
-final long remaining = node.getRemaining(storage.getStorageType());
+final long remaining = node.getRemaining(storage.getStorageType(),
+requiredSize);
 if (requiredSize  remaining - scheduledSize) {
   logNodeIsNotChosen(storage, the node does not have enough 
   + storage.getStorageType() +  space

http://git-wip-us.apache.org/repos/asf/hadoop/blob/146db49f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 53e1b41..d081640 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.hdfs.util.EnumCounters;
@@ -664,16 +665,26 @@ public

hadoop git commit: HDFS-8908. TestAppendSnapshotTruncate may fail with IOException: Failed to replace a bad datanode. (Tsz Wo Nicholas Sze via yliu)

2015-08-19 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fc6b176e7 - 0824fa1c4


HDFS-8908. TestAppendSnapshotTruncate may fail with IOException: Failed to 
replace a bad datanode. (Tsz Wo Nicholas Sze via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0824fa1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0824fa1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0824fa1c

Branch: refs/heads/branch-2
Commit: 0824fa1c4fa7c4ac424a043603574667fa14fe6a
Parents: fc6b176
Author: yliu y...@apache.org
Authored: Wed Aug 19 21:56:56 2015 +0800
Committer: yliu y...@apache.org
Committed: Wed Aug 19 21:56:56 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  3 +++
 .../apache/hadoop/hdfs/TestAppendSnapshotTruncate.java | 13 -
 2 files changed, 11 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0824fa1c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 889025b..b7192ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -830,6 +830,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
 
+HDFS-8908. TestAppendSnapshotTruncate may fail with IOException: Failed to
+replace a bad datanode. (Tsz Wo Nicholas Sze via yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0824fa1c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
index d67ceb0..9a09987 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -27,7 +27,6 @@ import java.io.RandomAccessFile;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.Random;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -42,6 +41,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -64,10 +64,10 @@ public class TestAppendSnapshotTruncate {
   }
   private static final Log LOG = 
LogFactory.getLog(TestAppendSnapshotTruncate.class);
   private static final int BLOCK_SIZE = 1024;
-  private static final int DATANODE_NUM = 3;
+  private static final int DATANODE_NUM = 4;
   private static final short REPLICATION = 3;
-  private static final int FILE_WORKER_NUM = 3;
-  private static final long TEST_TIME_SECOND = 10;
+  private static final int FILE_WORKER_NUM = 10;
+  private static final long TEST_TIME_SECOND = 20;
   private static final long TEST_TIMEOUT_SECOND = TEST_TIME_SECOND + 60;
 
   static final int SHORT_HEARTBEAT = 1;
@@ -85,6 +85,7 @@ public class TestAppendSnapshotTruncate {
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
 conf.setLong(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
+conf.setBoolean(ReplaceDatanodeOnFailure.BEST_EFFORT_KEY, true);
 cluster = new MiniDFSCluster.Builder(conf)
 .format(true)
 .numDataNodes(DATANODE_NUM)
@@ -476,7 +477,9 @@ public class TestAppendSnapshotTruncate {
 }
 
 void pause() {
-  Preconditions.checkState(state.compareAndSet(State.RUNNING, State.IDLE));
+  checkErrorState();
+  Preconditions.checkState(state.compareAndSet(State.RUNNING, State.IDLE),
+  %s: state=%s != %s, name, state.get(), State.RUNNING);
 }
 
 void stop() throws InterruptedException {



hadoop git commit: HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)

2015-08-17 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 51a00964d - 71566e238


HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71566e23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71566e23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71566e23

Branch: refs/heads/trunk
Commit: 71566e23820d33e0110ca55eded3299735e970b9
Parents: 51a0096
Author: yliu y...@apache.org
Authored: Tue Aug 18 09:23:06 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Aug 18 09:23:06 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 2 ++
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java   | 3 +--
 2 files changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71566e23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d1b04dc..132adc1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -820,6 +820,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8845. DiskChecker should not traverse the entire tree (Chang Li via
 Colin P. McCabe)
 
+HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)
+
   BUG FIXES
 
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71566e23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index cde6588..aad7fec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -33,7 +33,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
-import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicLong;
@@ -204,7 +203,7 @@ public class BlockManager implements BlockStatsMXBean {
* DataNode. We'll eventually remove these extras.
*/
   public final MapString, LightWeightLinkedSetBlock excessReplicateMap =
-new TreeMapString, LightWeightLinkedSetBlock();
+new HashMap();
 
   /**
* Store set of Blocks that need to be replicated 1 or more times.



hadoop git commit: HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)

2015-08-17 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 85cb596e2 - 076c68878


HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/076c6887
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/076c6887
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/076c6887

Branch: refs/heads/branch-2
Commit: 076c688780ecf66b0f593d7c19ce8944dc4451d4
Parents: 85cb596
Author: yliu y...@apache.org
Authored: Tue Aug 18 09:25:03 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Aug 18 09:25:03 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 2 ++
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java   | 3 +--
 2 files changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/076c6887/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7ab41e8..31b197a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -472,6 +472,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8845. DiskChecker should not traverse the entire tree (Chang Li via
 Colin P. McCabe)
 
+HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)
+
   BUG FIXES
 
 HDFS-8091: ACLStatus and XAttributes should be presented to

http://git-wip-us.apache.org/repos/asf/hadoop/blob/076c6887/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 254a628..abcb2ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -33,7 +33,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
-import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicLong;
@@ -203,7 +202,7 @@ public class BlockManager implements BlockStatsMXBean {
* DataNode. We'll eventually remove these extras.
*/
   public final MapString, LightWeightLinkedSetBlock excessReplicateMap =
-new TreeMapString, LightWeightLinkedSetBlock();
+new HashMap();
 
   /**
* Store set of Blocks that need to be replicated 1 or more times.



hadoop git commit: HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)

2015-08-13 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 40f815131 - 53bef9c5b


HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53bef9c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53bef9c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53bef9c5

Branch: refs/heads/trunk
Commit: 53bef9c5b98dee87d4ffaf35415bc38e2f876ed8
Parents: 40f8151
Author: yliu y...@apache.org
Authored: Thu Aug 13 16:45:20 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Aug 13 16:45:20 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../org/apache/hadoop/net/NetworkTopology.java  | 38 ++--
 .../apache/hadoop/net/TestNetworkTopology.java  |  1 +
 3 files changed, 21 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bef9c5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 78f12e4..c80be05 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -749,6 +749,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12318. Expose underlying LDAP exceptions in SaslPlainServer. (Mike
 Yoder via atm)
 
+HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bef9c5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 970ad40..fe6e439 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -166,10 +166,11 @@ public class NetworkTopology {
  * @return true if the node is added; false otherwise
  */
 boolean add(Node n) {
-  if (!isAncestor(n))
-throw new IllegalArgumentException(n.getName()+, which is located at 
-+n.getNetworkLocation()+, is not a decendent of 
-+getPath(this));
+  if (!isAncestor(n)) {
+throw new IllegalArgumentException(n.getName()
++ , which is located at  + n.getNetworkLocation()
++ , is not a descendent of  + getPath(this));
+  }
   if (isParent(n)) {
 // this node is the parent of n; add n directly
 n.setParent(this);
@@ -227,12 +228,11 @@ public class NetworkTopology {
  * @return true if the node is deleted; false otherwise
  */
 boolean remove(Node n) {
-  String parent = n.getNetworkLocation();
-  String currentPath = getPath(this);
-  if (!isAncestor(n))
+  if (!isAncestor(n)) {
 throw new IllegalArgumentException(n.getName()
-   +, which is located at 
-   +parent+, is not a descendent of 
+currentPath);
++ , which is located at  + n.getNetworkLocation()
++ , is not a descendent of  + getPath(this));
+  }
   if (isParent(n)) {
 // this node is the parent of n; remove n directly
 if (childrenMap.containsKey(n.getName())) {
@@ -250,15 +250,8 @@ public class NetworkTopology {
   } else {
 // find the next ancestor node: the parent node
 String parentName = getNextAncestorName(n);
-InnerNode parentNode = null;
-int i;
-for(i=0; ichildren.size(); i++) {
-  if (children.get(i).getName().equals(parentName)) {
-parentNode = (InnerNode)children.get(i);
-break;
-  }
-}
-if (parentNode==null) {
+InnerNode parentNode = (InnerNode)childrenMap.get(parentName);
+if (parentNode == null) {
   return false;
 }
 // remove n from the parent node
@@ -266,8 +259,13 @@ public class NetworkTopology {
 // if the parent node has no children, remove the parent node too
 if (isRemoved) {
   if (parentNode.getNumOfChildren() == 0) {
-Node prev = children.remove(i);
-childrenMap.remove(prev.getName());
+for(int i=0; i  children.size(); i++) {
+  if (children.get(i).getName().equals(parentName

hadoop git commit: HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)

2015-08-13 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 200fadedc - 073cb16d8


HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/073cb16d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/073cb16d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/073cb16d

Branch: refs/heads/branch-2
Commit: 073cb16d81489239942f8b00f7bb3299ccddd45c
Parents: 200fade
Author: yliu y...@apache.org
Authored: Thu Aug 13 16:42:40 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Aug 13 16:42:40 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../org/apache/hadoop/net/NetworkTopology.java  | 38 ++--
 .../apache/hadoop/net/TestNetworkTopology.java  |  1 +
 3 files changed, 21 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/073cb16d/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5676e6c..5c4def1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -173,6 +173,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11813. releasedocmaker.py should use today's date instead of
 unreleased (Darrell Taylor via aw)
 
+HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)
+
   BUG FIXES
 
 HADOOP-12124. Add HTrace support for FsShell (cmccabe)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/073cb16d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 970ad40..fe6e439 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -166,10 +166,11 @@ public class NetworkTopology {
  * @return true if the node is added; false otherwise
  */
 boolean add(Node n) {
-  if (!isAncestor(n))
-throw new IllegalArgumentException(n.getName()+, which is located at 
-+n.getNetworkLocation()+, is not a decendent of 
-+getPath(this));
+  if (!isAncestor(n)) {
+throw new IllegalArgumentException(n.getName()
++ , which is located at  + n.getNetworkLocation()
++ , is not a descendent of  + getPath(this));
+  }
   if (isParent(n)) {
 // this node is the parent of n; add n directly
 n.setParent(this);
@@ -227,12 +228,11 @@ public class NetworkTopology {
  * @return true if the node is deleted; false otherwise
  */
 boolean remove(Node n) {
-  String parent = n.getNetworkLocation();
-  String currentPath = getPath(this);
-  if (!isAncestor(n))
+  if (!isAncestor(n)) {
 throw new IllegalArgumentException(n.getName()
-   +, which is located at 
-   +parent+, is not a descendent of 
+currentPath);
++ , which is located at  + n.getNetworkLocation()
++ , is not a descendent of  + getPath(this));
+  }
   if (isParent(n)) {
 // this node is the parent of n; remove n directly
 if (childrenMap.containsKey(n.getName())) {
@@ -250,15 +250,8 @@ public class NetworkTopology {
   } else {
 // find the next ancestor node: the parent node
 String parentName = getNextAncestorName(n);
-InnerNode parentNode = null;
-int i;
-for(i=0; ichildren.size(); i++) {
-  if (children.get(i).getName().equals(parentName)) {
-parentNode = (InnerNode)children.get(i);
-break;
-  }
-}
-if (parentNode==null) {
+InnerNode parentNode = (InnerNode)childrenMap.get(parentName);
+if (parentNode == null) {
   return false;
 }
 // remove n from the parent node
@@ -266,8 +259,13 @@ public class NetworkTopology {
 // if the parent node has no children, remove the parent node too
 if (isRemoved) {
   if (parentNode.getNumOfChildren() == 0) {
-Node prev = children.remove(i);
-childrenMap.remove(prev.getName());
+for(int i=0; i  children.size(); i++) {
+  if (children.get(i).getName().equals(parentName

hadoop git commit: HDFS-8850. VolumeScanner thread exits with exception if there is no block pool to be scanned but there are suspicious blocks. (Colin Patrick McCabe via yliu)

2015-08-04 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk c3364ca8e - 9a080


HDFS-8850. VolumeScanner thread exits with exception if there is no block pool 
to be scanned but there are suspicious blocks. (Colin Patrick McCabe via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a08

Branch: refs/heads/trunk
Commit: 9a0806824e8982ec3b47315f9b960474186a
Parents: c3364ca
Author: yliu y...@apache.org
Authored: Tue Aug 4 15:54:23 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Aug 4 15:54:23 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  4 
 .../hadoop/hdfs/server/datanode/VolumeScanner.java  | 12 +++-
 2 files changed, 11 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a08/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a2d7c2f..eeefd84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1109,6 +1109,10 @@ Release 2.8.0 - UNRELEASED
 HDFS-8847. change TestHDFSContractAppend to not override
 testRenameFileBeingAppended method. (Zhihai Xu)
 
+HDFS-8850. VolumeScanner thread exits with exception if there is no block
+pool to be scanned but there are suspicious blocks. (Colin Patrick McCabe
+via yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a08/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index ff655c2..212e13b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -536,11 +536,13 @@ public class VolumeScanner extends Thread {
   return 0;
 }
   }
-  long saveDelta = monotonicMs - curBlockIter.getLastSavedMs();
-  if (saveDelta = conf.cursorSaveMs) {
-LOG.debug({}: saving block iterator {} after {} ms.,
-this, curBlockIter, saveDelta);
-saveBlockIterator(curBlockIter);
+  if (curBlockIter != null) {
+long saveDelta = monotonicMs - curBlockIter.getLastSavedMs();
+if (saveDelta = conf.cursorSaveMs) {
+  LOG.debug({}: saving block iterator {} after {} ms.,
+  this, curBlockIter, saveDelta);
+  saveBlockIterator(curBlockIter);
+}
   }
   bytesScanned = scanBlock(block, conf.targetBytesPerSec);
   if (bytesScanned = 0) {



hadoop git commit: HDFS-8850. VolumeScanner thread exits with exception if there is no block pool to be scanned but there are suspicious blocks. (Colin Patrick McCabe via yliu)

2015-08-04 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 89ccfd9fc - d2b941f94


HDFS-8850. VolumeScanner thread exits with exception if there is no block pool 
to be scanned but there are suspicious blocks. (Colin Patrick McCabe via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2b941f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2b941f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2b941f9

Branch: refs/heads/branch-2
Commit: d2b941f94a835f7bdde7714d21a470b505aa582b
Parents: 89ccfd9
Author: yliu y...@apache.org
Authored: Tue Aug 4 15:51:37 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Aug 4 15:51:37 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  4 
 .../hadoop/hdfs/server/datanode/VolumeScanner.java  | 12 +++-
 2 files changed, 11 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b941f9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f03fb21..2b19fcb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -767,6 +767,10 @@ Release 2.8.0 - UNRELEASED
 HDFS-8847. change TestHDFSContractAppend to not override
 testRenameFileBeingAppended method. (Zhihai Xu)
 
+HDFS-8850. VolumeScanner thread exits with exception if there is no block
+pool to be scanned but there are suspicious blocks. (Colin Patrick McCabe
+via yliu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b941f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index ff655c2..212e13b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -536,11 +536,13 @@ public class VolumeScanner extends Thread {
   return 0;
 }
   }
-  long saveDelta = monotonicMs - curBlockIter.getLastSavedMs();
-  if (saveDelta = conf.cursorSaveMs) {
-LOG.debug({}: saving block iterator {} after {} ms.,
-this, curBlockIter, saveDelta);
-saveBlockIterator(curBlockIter);
+  if (curBlockIter != null) {
+long saveDelta = monotonicMs - curBlockIter.getLastSavedMs();
+if (saveDelta = conf.cursorSaveMs) {
+  LOG.debug({}: saving block iterator {} after {} ms.,
+  this, curBlockIter, saveDelta);
+  saveBlockIterator(curBlockIter);
+}
   }
   bytesScanned = scanBlock(block, conf.targetBytesPerSec);
   if (bytesScanned = 0) {



hadoop git commit: HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)

2015-07-22 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 94c6a4aa8 - 402532628


HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40253262
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40253262
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40253262

Branch: refs/heads/trunk
Commit: 4025326288c0167ff300d4f7ecc96f84ed141912
Parents: 94c6a4a
Author: yliu y...@apache.org
Authored: Wed Jul 22 15:16:50 2015 +0800
Committer: yliu y...@apache.org
Committed: Wed Jul 22 15:16:50 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 .../hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java| 5 +++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40253262/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 50803de..66cb89e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -740,6 +740,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8495. Consolidate append() related implementation into a single class.
 (Rakesh R via wheat9)
 
+HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40253262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
index a465f85..c486095 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
@@ -22,9 +22,9 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.GregorianCalendar;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.TreeMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -36,6 +36,7 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdfs.DFSUtil;
 
 import com.google.common.annotations.VisibleForTesting;
+
 import org.slf4j.Logger;
 
 /**
@@ -47,7 +48,7 @@ import org.slf4j.Logger;
 class InvalidateBlocks {
   /** Mapping: DatanodeInfo - Collection of Blocks */
   private final MapDatanodeInfo, LightWeightHashSetBlock node2blocks =
-  new TreeMapDatanodeInfo, LightWeightHashSetBlock();
+  new HashMapDatanodeInfo, LightWeightHashSetBlock();
   /** The total number of blocks in the map. */
   private long numBlocks = 0L;
 



hadoop git commit: HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)

2015-07-22 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 03d68b557 - 930e344d7


HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/930e344d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/930e344d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/930e344d

Branch: refs/heads/branch-2
Commit: 930e344d7885cc392eb3ee01e59b748f175fff21
Parents: 03d68b5
Author: yliu y...@apache.org
Authored: Wed Jul 22 15:15:08 2015 +0800
Committer: yliu y...@apache.org
Committed: Wed Jul 22 15:15:08 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 .../hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java| 5 +++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/930e344d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ea7bdc4..1383ea9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -397,6 +397,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8495. Consolidate append() related implementation into a single class.
 (Rakesh R via wheat9)
 
+HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/930e344d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
index e357528..c60266e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
@@ -22,9 +22,9 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.GregorianCalendar;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.TreeMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -36,6 +36,7 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdfs.DFSUtil;
 
 import com.google.common.annotations.VisibleForTesting;
+
 import org.slf4j.Logger;
 
 /**
@@ -47,7 +48,7 @@ import org.slf4j.Logger;
 class InvalidateBlocks {
   /** Mapping: DatanodeInfo - Collection of Blocks */
   private final MapDatanodeInfo, LightWeightHashSetBlock node2blocks =
-  new TreeMapDatanodeInfo, LightWeightHashSetBlock();
+  new HashMapDatanodeInfo, LightWeightHashSetBlock();
   /** The total number of blocks in the map. */
   private long numBlocks = 0L;
 



hadoop git commit: HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)

2015-07-20 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3d58c7a70 - cadd02ad1


HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cadd02ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cadd02ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cadd02ad

Branch: refs/heads/branch-2
Commit: cadd02ad1ddde1644f7241333c8ae446cb8c5f7f
Parents: 3d58c7a
Author: yliu y...@apache.org
Authored: Tue Jul 21 09:18:43 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Jul 21 09:18:43 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  2 ++
 .../blockmanagement/CorruptReplicasMap.java  | 19 ++-
 .../blockmanagement/TestCorruptReplicaInfo.java  | 12 ++--
 3 files changed, 22 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadd02ad/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c4ce009..f9b365b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -386,6 +386,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
 files rather than the entire DFSClient. (mingma)
 
+HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadd02ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index fc2e234..f83cbaf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -17,12 +17,19 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.Server;
 
-import java.util.*;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Stores information about all corrupt blocks in the File System.
@@ -46,8 +53,8 @@ public class CorruptReplicasMap{
 CORRUPTION_REPORTED  // client or datanode reported the corruption
   }
 
-  private final SortedMapBlock, MapDatanodeDescriptor, Reason 
corruptReplicasMap =
-new TreeMapBlock, MapDatanodeDescriptor, Reason();
+  private final MapBlock, MapDatanodeDescriptor, Reason corruptReplicasMap 
=
+new HashMapBlock, MapDatanodeDescriptor, Reason();
 
   /**
* Mark the block belonging to datanode as corrupt.
@@ -181,13 +188,15 @@ public class CorruptReplicasMap{
* @return Up to numExpectedBlocks blocks from startingBlockId if it exists
*
*/
-  long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
+  @VisibleForTesting
+  long[] getCorruptReplicaBlockIdsForTesting(int numExpectedBlocks,
Long startingBlockId) {
 if (numExpectedBlocks  0 || numExpectedBlocks  100) {
   return null;
 }
 
-IteratorBlock blockIt = corruptReplicasMap.keySet().iterator();
+IteratorBlock blockIt = 
+new TreeMap(corruptReplicasMap).keySet().iterator();
 
 // if the starting block id was specified, iterate over keys until
 // we find the matching block. If we find a matching block, break

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadd02ad/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
index 21fb54e..4bdaaac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test

hadoop git commit: HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)

2015-07-20 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk ed01dc70b - d6d58606b


HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6d58606
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6d58606
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6d58606

Branch: refs/heads/trunk
Commit: d6d58606b8adf94b208aed5fc2d054b9dd081db1
Parents: ed01dc7
Author: yliu y...@apache.org
Authored: Tue Jul 21 09:20:22 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Jul 21 09:20:22 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  2 ++
 .../blockmanagement/CorruptReplicasMap.java  | 19 ++-
 .../blockmanagement/TestCorruptReplicaInfo.java  | 12 ++--
 3 files changed, 22 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d58606/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cd32c0e..388b553 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -727,6 +727,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
 files rather than the entire DFSClient. (mingma)
 
+HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d58606/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index fc2e234..f83cbaf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -17,12 +17,19 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.Server;
 
-import java.util.*;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Stores information about all corrupt blocks in the File System.
@@ -46,8 +53,8 @@ public class CorruptReplicasMap{
 CORRUPTION_REPORTED  // client or datanode reported the corruption
   }
 
-  private final SortedMapBlock, MapDatanodeDescriptor, Reason 
corruptReplicasMap =
-new TreeMapBlock, MapDatanodeDescriptor, Reason();
+  private final MapBlock, MapDatanodeDescriptor, Reason corruptReplicasMap 
=
+new HashMapBlock, MapDatanodeDescriptor, Reason();
 
   /**
* Mark the block belonging to datanode as corrupt.
@@ -181,13 +188,15 @@ public class CorruptReplicasMap{
* @return Up to numExpectedBlocks blocks from startingBlockId if it exists
*
*/
-  long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
+  @VisibleForTesting
+  long[] getCorruptReplicaBlockIdsForTesting(int numExpectedBlocks,
Long startingBlockId) {
 if (numExpectedBlocks  0 || numExpectedBlocks  100) {
   return null;
 }
 
-IteratorBlock blockIt = corruptReplicasMap.keySet().iterator();
+IteratorBlock blockIt = 
+new TreeMap(corruptReplicasMap).keySet().iterator();
 
 // if the starting block id was specified, iterate over keys until
 // we find the matching block. If we find a matching block, break

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d58606/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
index 21fb54e..4bdaaac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java

hadoop git commit: HDFS-8619. Erasure Coding: revisit replica counting for striped blocks. (Jing Zhao via yliu)

2015-07-15 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 0a93712f3 - f32d9a175


HDFS-8619. Erasure Coding: revisit replica counting for striped blocks. (Jing 
Zhao via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f32d9a17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f32d9a17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f32d9a17

Branch: refs/heads/HDFS-7285
Commit: f32d9a175837c5b6c3d008089e46475d27a0935c
Parents: 0a93712
Author: yliu y...@apache.org
Authored: Wed Jul 15 22:35:19 2015 +0800
Committer: yliu y...@apache.org
Committed: Wed Jul 15 22:35:19 2015 +0800

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +
 .../hdfs/server/blockmanagement/BlockInfo.java  |  3 +
 .../blockmanagement/BlockInfoContiguous.java|  5 ++
 .../blockmanagement/BlockInfoStriped.java   | 11 
 .../server/blockmanagement/BlockManager.java| 42 +++--
 .../hdfs/server/blockmanagement/BlocksMap.java  |  4 +-
 .../hdfs/TestReadStripedFileWithDecoding.java   | 64 
 .../server/namenode/TestAddStripedBlocks.java   | 23 ---
 8 files changed, 127 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32d9a17/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 93c3162..8403d1a 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -353,3 +353,6 @@
 
 HDFS-8734. Erasure Coding: fix one cell need two packets. (Walter Su via
 jing9)
+
+HDFS-8619. Erasure Coding: revisit replica counting for striped blocks.
+(Jing Zhao via yliu)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32d9a17/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 61068b9..82aa348 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -178,6 +178,9 @@ public abstract class BlockInfo extends Block
 
   public abstract boolean isStriped();
 
+  /** @return true if there is no datanode storage associated with the block */
+  abstract boolean hasNoStorage();
+
   /**
* Find specified DatanodeDescriptor.
* @return index or -1 if not found.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32d9a17/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 5199101..dfca8ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -150,4 +150,9 @@ public class BlockInfoContiguous extends BlockInfo {
   public final boolean isStriped() {
 return false;
   }
+
+  @Override
+  final boolean hasNoStorage() {
+return getStorageInfo(0) == null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32d9a17/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 07e29f8..6674510 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -272,4 +272,15 @@ public class BlockInfoStriped extends BlockInfo {
 }
 return ucBlock

hadoop git commit: HDFS-8684. Erasure Coding: fix some block number calculation for striped block. (yliu)

2015-07-06 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 0b7af27b9 - ee01a0950


HDFS-8684. Erasure Coding: fix some block number calculation for striped block. 
(yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee01a095
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee01a095
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee01a095

Branch: refs/heads/HDFS-7285
Commit: ee01a09500224136464f2c3e0a5d9ba53242d93f
Parents: 0b7af27
Author: yliu y...@apache.org
Authored: Mon Jul 6 19:14:18 2015 +0800
Committer: yliu y...@apache.org
Committed: Mon Jul 6 19:14:18 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt |  5 -
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java | 11 ---
 .../apache/hadoop/hdfs/server/namenode/INodeFile.java|  6 +-
 3 files changed, 9 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee01a095/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 7e10753..8f720fc 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -325,4 +325,7 @@
 multiple times.  (Kai Sasaki via szetszwo)
 
 HDFS-8468. 2 RPC calls for every file read in DFSClient#open(..) resulting 
in
-double Audit log entries (vinayakumarb)
\ No newline at end of file
+double Audit log entries (vinayakumarb)
+
+HDFS-8684. Erasure Coding: fix some block number calculation for striped
+block. (yliu)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee01a095/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index dcb0be77..c98bdb0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3109,12 +3109,12 @@ public class BlockManager {
 bc.getStoragePolicyID());
 final ListStorageType excessTypes = storagePolicy.chooseExcess(
 replication, DatanodeStorageInfo.toStorageTypes(nonExcess));
-if (!storedBlock.isStriped()) {
-  chooseExcessReplicasContiguous(bc, nonExcess, storedBlock,
-  replication, addedNode, delNodeHint, excessTypes);
-} else {
+if (storedBlock.isStriped()) {
   chooseExcessReplicasStriped(bc, nonExcess, storedBlock, delNodeHint,
   excessTypes);
+} else {
+  chooseExcessReplicasContiguous(bc, nonExcess, storedBlock,
+  replication, addedNode, delNodeHint, excessTypes);
 }
   }
 
@@ -3191,9 +3191,6 @@ public class BlockManager {
 assert storedBlock instanceof BlockInfoStriped;
 BlockInfoStriped sblk = (BlockInfoStriped) storedBlock;
 short groupSize = sblk.getTotalBlockNum();
-if (nonExcess.size() = groupSize) {
-  return;
-}
 BlockPlacementPolicy placementPolicy = placementPolicies.getPolicy(true);
 ListDatanodeStorageInfo empty = new ArrayList(0);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee01a095/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 4688001..b2b0fea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -809,13 +809,9 @@ public class INodeFile extends INodeWithAdditionalFields
   if (!includesLastUcBlock) {
 size = 0;
   } else if (usePreferredBlockSize4LastUcBlock) {
-// Striped blocks keeps block group which counts
-// (data blocks num + parity blocks num). When you
-// count actual used size by BlockInfoStripedUC must
-// be multiplied by these blocks number.
 BlockInfoStripedUnderConstruction blockInfoStripedUC

hadoop git commit: HDFS-8559. Erasure Coding: fix non-protobuf fsimage for striped blocks. (Jing Zhao via yliu)

2015-06-14 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 683332b36 - 49d5cff49


HDFS-8559. Erasure Coding: fix non-protobuf fsimage for striped blocks. (Jing 
Zhao via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49d5cff4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49d5cff4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49d5cff4

Branch: refs/heads/HDFS-7285
Commit: 49d5cff49011cc0878665204e22b5c832bc914ce
Parents: 683332b
Author: yliu y...@apache.org
Authored: Sun Jun 14 15:39:19 2015 +0800
Committer: yliu y...@apache.org
Committed: Sun Jun 14 15:39:19 2015 +0800

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +
 .../hdfs/server/namenode/FSImageFormat.java | 58 +++
 .../server/namenode/FSImageSerialization.java   | 76 +---
 .../hdfs/server/namenode/TestFSImage.java   | 22 +-
 4 files changed, 33 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49d5cff4/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 2eb8259..1ae3e9b 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -299,3 +299,6 @@
 
 HDFS-8585. Erasure Coding: Remove dataBlockNum and parityBlockNum from
 StripedBlockProto. (Yi Liu via jing9)
+
+HDFS-8559. Erasure Coding: fix non-protobuf fsimage for striped blocks.
+(Jing Zhao via yliu)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49d5cff4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index 3083952..d9a74e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -365,12 +365,6 @@ public class FSImageFormat {
   long maxSequentialBlockId = in.readLong();
   namesystem.getBlockIdManager().setLastAllocatedContiguousBlockId(
   maxSequentialBlockId);
-  if (NameNodeLayoutVersion.supports(
-  NameNodeLayoutVersion.Feature.ERASURE_CODING, imgVersion)) {
-final long maxStripedBlockId = in.readLong();
-namesystem.getBlockIdManager().setLastAllocatedStripedBlockId(
-maxStripedBlockId);
-  }
 } else {
 
   long startingGenStamp = namesystem.getBlockIdManager()
@@ -759,31 +753,16 @@ public class FSImageFormat {
   atime = in.readLong();
 }
 final long blockSize = in.readLong();
-final boolean isStriped = NameNodeLayoutVersion.supports(
-NameNodeLayoutVersion.Feature.ERASURE_CODING, imgVersion)
- (in.readBoolean());
 final int numBlocks = in.readInt();
-// TODO: ECSchema can be restored from persisted file (HDFS-7859).
-final ECSchema schema = isStriped ?
-ErasureCodingSchemaManager.getSystemDefaultSchema() : null;
 
 if (numBlocks = 0) {
   // file
   
   // read blocks
-  Block[] blocks;
-  if (isStriped) {
-blocks = new Block[numBlocks];
-for (int j = 0; j  numBlocks; j++) {
-  blocks[j] = new BlockInfoStriped(new Block(), schema);
-  blocks[j].readFields(in);
-}
-  } else {
-blocks = new BlockInfoContiguous[numBlocks];
-for (int j = 0; j  numBlocks; j++) {
-  blocks[j] = new BlockInfoContiguous(replication);
-  blocks[j].readFields(in);
-}
+  Block[] blocks = new BlockInfoContiguous[numBlocks];
+  for (int j = 0; j  numBlocks; j++) {
+blocks[j] = new BlockInfoContiguous(replication);
+blocks[j].readFields(in);
   }
 
   String clientName = ;
@@ -803,16 +782,8 @@ public class FSImageFormat {
 // convert the last block to BlockUC
 if (blocks.length  0) {
   Block lastBlk = blocks[blocks.length - 1];
-  if (isStriped){
-BlockInfoStriped lastStripedBlk = (BlockInfoStriped) lastBlk;
-blocks[blocks.length - 1]
-= new BlockInfoStripedUnderConstruction(lastBlk,
-lastStripedBlk.getSchema

hadoop git commit: HDFS-8363. Erasure Coding: DFSStripedInputStream#seekToNewSource. (yliu)

2015-05-12 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 95205a31f - 64be3d5ba


HDFS-8363. Erasure Coding: DFSStripedInputStream#seekToNewSource. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64be3d5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64be3d5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64be3d5b

Branch: refs/heads/HDFS-7285
Commit: 64be3d5ba7905c8e707997abccf5540918d16ad3
Parents: 95205a3
Author: yliu y...@apache.org
Authored: Wed May 13 08:48:56 2015 +0800
Committer: yliu y...@apache.org
Committed: Wed May 13 08:48:56 2015 +0800

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt |  2 ++
 .../apache/hadoop/hdfs/DFSStripedInputStream.java| 15 ---
 2 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64be3d5b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 79ad208..0a2bb9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -204,3 +204,5 @@
 
 HDFS-8368. Erasure Coding: DFS opening a non-existent file need to be 
 handled properly (Rakesh R via zhz)
+
+HDFS-8363. Erasure Coding: DFSStripedInputStream#seekToNewSource. (yliu)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64be3d5b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 7678fae..8f15eda 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -130,12 +130,12 @@ public class DFSStripedInputStream extends DFSInputStream 
{
 }
   }
 
-  private final short groupSize = HdfsConstants.NUM_DATA_BLOCKS;
-  private final BlockReader[] blockReaders = new BlockReader[groupSize];
-  private final DatanodeInfo[] currentNodes = new DatanodeInfo[groupSize];
+  private final BlockReader[] blockReaders;
+  private final DatanodeInfo[] currentNodes;
   private final int cellSize;
   private final short dataBlkNum;
   private final short parityBlkNum;
+  private final short groupSize;
   /** the buffer for a complete stripe */
   private ByteBuffer curStripeBuf;
   private final ECSchema schema;
@@ -155,6 +155,9 @@ public class DFSStripedInputStream extends DFSInputStream {
 cellSize = schema.getChunkSize();
 dataBlkNum = (short) schema.getNumDataUnits();
 parityBlkNum = (short) schema.getNumParityUnits();
+groupSize = dataBlkNum;
+blockReaders = new BlockReader[groupSize];
+currentNodes = new DatanodeInfo[groupSize];
 curStripeRange = new StripeRange(0, 0);
 readingService =
 new ExecutorCompletionService(dfsClient.getStripedReadsThreadPool());
@@ -392,6 +395,12 @@ public class DFSStripedInputStream extends DFSInputStream {
   }
 
   @Override
+  public synchronized boolean seekToNewSource(long targetPos)
+  throws IOException {
+return false;
+  }
+
+  @Override
   protected synchronized int readWithStrategy(ReaderStrategy strategy,
   int off, int len) throws IOException {
 dfsClient.checkOpen();



hadoop git commit: HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication. (Contributed by Zhe Zhang)

2015-05-12 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8badd82ce - 6d5da9484


HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication. 
(Contributed by Zhe Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d5da948
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d5da948
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d5da948

Branch: refs/heads/trunk
Commit: 6d5da9484185ca9f585195d6da069b9cd5be4044
Parents: 8badd82
Author: yliu y...@apache.org
Authored: Tue May 12 21:29:22 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue May 12 21:29:22 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockCollection.java |  2 +-
 .../blockmanagement/BlockInfoContiguous.java|  2 +-
 .../server/blockmanagement/BlockManager.java| 16 ++---
 .../blockmanagement/DecommissionManager.java| 10 
 .../hdfs/server/namenode/FSDirAttrOp.java   |  4 ++--
 .../hdfs/server/namenode/FSDirConcatOp.java |  4 ++--
 .../hdfs/server/namenode/FSDirectory.java   |  4 ++--
 .../hdfs/server/namenode/FSEditLogLoader.java   |  7 +++---
 .../hdfs/server/namenode/FSNamesystem.java  |  4 ++--
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  8 +++
 .../hdfs/server/namenode/NamenodeFsck.java  |  9 ---
 .../snapshot/FileWithSnapshotFeature.java   |  5 ++--
 .../blockmanagement/TestBlockManager.java   |  6 ++---
 .../blockmanagement/TestReplicationPolicy.java  |  4 ++--
 .../snapshot/TestFileWithSnapshotFeature.java   |  2 +-
 .../namenode/snapshot/TestSnapshotDeletion.java |  4 ++--
 .../snapshot/TestSnapshotReplication.java   | 25 
 18 files changed, 66 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d5da948/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7cff8d4..cd477af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -543,6 +543,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8357. Consolidate parameters of INode.CleanSubtree() into a parameter
 objects. (Li Lu via wheat9)
 
+HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication.
+(Contributed by Zhe Zhang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d5da948/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index e9baf85..c0a959c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -58,7 +58,7 @@ public interface BlockCollection {
* Get block replication for the collection 
* @return block replication value
*/
-  public short getBlockReplication();
+  public short getPreferredBlockReplication();
 
   /** 
* @return the storage policy ID.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d5da948/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index df27882..1ba3536 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -380,7 +380,7 @@ public class BlockInfoContiguous extends Block
 if(isComplete()) {
   BlockInfoContiguousUnderConstruction ucBlock =
   new BlockInfoContiguousUnderConstruction(this,
-  getBlockCollection().getBlockReplication(), s, targets);
+  getBlockCollection().getPreferredBlockReplication(), s, targets);
   ucBlock.setBlockCollection(getBlockCollection());
   return ucBlock;
 }

http

hadoop git commit: HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication. (Contributed by Zhe Zhang)

2015-05-12 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f887243f8 - c31c6fbda


HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication. 
(Contributed by Zhe Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c31c6fbd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c31c6fbd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c31c6fbd

Branch: refs/heads/branch-2
Commit: c31c6fbda7e5b9e6f97c1d78ddc40e5a6795020c
Parents: f887243
Author: yliu y...@apache.org
Authored: Tue May 12 21:26:57 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue May 12 21:26:57 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockCollection.java |  2 +-
 .../blockmanagement/BlockInfoContiguous.java|  2 +-
 .../server/blockmanagement/BlockManager.java| 16 ++---
 .../blockmanagement/DecommissionManager.java| 10 
 .../hdfs/server/namenode/FSDirAttrOp.java   |  4 ++--
 .../hdfs/server/namenode/FSDirConcatOp.java |  4 ++--
 .../hdfs/server/namenode/FSDirectory.java   |  4 ++--
 .../hdfs/server/namenode/FSEditLogLoader.java   |  7 +++---
 .../hdfs/server/namenode/FSNamesystem.java  |  4 ++--
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  8 +++
 .../hdfs/server/namenode/NamenodeFsck.java  |  9 ---
 .../snapshot/FileWithSnapshotFeature.java   |  5 ++--
 .../blockmanagement/TestBlockManager.java   |  6 ++---
 .../snapshot/TestFileWithSnapshotFeature.java   |  2 +-
 .../namenode/snapshot/TestSnapshotDeletion.java |  4 ++--
 .../snapshot/TestSnapshotReplication.java   | 25 
 17 files changed, 64 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c31c6fbd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3d1ab84..c369419 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -210,6 +210,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8357. Consolidate parameters of INode.CleanSubtree() into a parameter
 objects. (Li Lu via wheat9)
 
+HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication.
+(Contributed by Zhe Zhang) 
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c31c6fbd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index e9baf85..c0a959c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -58,7 +58,7 @@ public interface BlockCollection {
* Get block replication for the collection 
* @return block replication value
*/
-  public short getBlockReplication();
+  public short getPreferredBlockReplication();
 
   /** 
* @return the storage policy ID.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c31c6fbd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 695eadf..4ab75e8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -383,7 +383,7 @@ public class BlockInfoContiguous extends Block
 if(isComplete()) {
   BlockInfoContiguousUnderConstruction ucBlock =
   new BlockInfoContiguousUnderConstruction(this,
-  getBlockCollection().getBlockReplication(), s, targets);
+  getBlockCollection().getPreferredBlockReplication(), s, targets);
   ucBlock.setBlockCollection(getBlockCollection());
   return ucBlock;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob

hadoop git commit: HDFS-7997. The first non-existing xattr should also throw IOException. (zhouyingchao via yliu)

2015-03-31 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 99b825569 - e4cd67e9b


HDFS-7997. The first non-existing xattr should also throw IOException. 
(zhouyingchao via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4cd67e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4cd67e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4cd67e9

Branch: refs/heads/branch-2
Commit: e4cd67e9b0bcc4e5aedb8c189dc780326c69b032
Parents: 99b8255
Author: yliu y...@apache.org
Authored: Tue Mar 31 21:16:13 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Mar 31 21:16:13 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java | 3 ++-
 .../hadoop-hdfs/src/test/resources/testXAttrConf.xml | 4 ++--
 3 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4cd67e9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3d7690e..da3d729 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -77,6 +77,9 @@ Release 2.8.0 - UNRELEASED
 DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin
 P. McCabe)
 
+HDFS-7997. The first non-existing xattr should also throw IOException.
+(zhouyingchao via yliu)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4cd67e9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 45e63f2..d5c9124 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -108,7 +108,8 @@ class FSDirXAttrOp {
   return filteredAll;
 }
 if (filteredAll == null || filteredAll.isEmpty()) {
-  return null;
+  throw new IOException(
+  At least one of the attributes provided was not found.);
 }
 ListXAttr toGet = Lists.newArrayListWithCapacity(xAttrs.size());
 for (XAttr xAttr : xAttrs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4cd67e9/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
index 9c66cba..c2e836c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
@@ -420,8 +420,8 @@
   /cleanup-commands
   comparators
comparator
-  typeExactComparator/type
-  expected-output# file: /file1#LF#/expected-output
+  typeSubstringComparator/type
+  expected-outputAt least one of the attributes provided was not 
found/expected-output
 /comparator
   /comparators
 /test



hadoop git commit: HDFS-7997. The first non-existing xattr should also throw IOException. (zhouyingchao via yliu)

2015-03-31 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk b5a22e983 - 3df61f303


HDFS-7997. The first non-existing xattr should also throw IOException. 
(zhouyingchao via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3df61f30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3df61f30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3df61f30

Branch: refs/heads/trunk
Commit: 3df61f303a8c0f5105661dc86fc3a34a60c49066
Parents: b5a22e9
Author: yliu y...@apache.org
Authored: Tue Mar 31 21:17:44 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Mar 31 21:17:44 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java | 3 ++-
 .../hadoop-hdfs/src/test/resources/testXAttrConf.xml | 4 ++--
 3 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3df61f30/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0bea916..e8075e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -392,6 +392,9 @@ Release 2.8.0 - UNRELEASED
 DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin
 P. McCabe)
 
+HDFS-7997. The first non-existing xattr should also throw IOException.
+(zhouyingchao via yliu)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3df61f30/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 45e63f2..d5c9124 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -108,7 +108,8 @@ class FSDirXAttrOp {
   return filteredAll;
 }
 if (filteredAll == null || filteredAll.isEmpty()) {
-  return null;
+  throw new IOException(
+  At least one of the attributes provided was not found.);
 }
 ListXAttr toGet = Lists.newArrayListWithCapacity(xAttrs.size());
 for (XAttr xAttr : xAttrs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3df61f30/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
index 9c66cba..c2e836c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
@@ -420,8 +420,8 @@
   /cleanup-commands
   comparators
comparator
-  typeExactComparator/type
-  expected-output# file: /file1#LF#/expected-output
+  typeSubstringComparator/type
+  expected-outputAt least one of the attributes provided was not 
found/expected-output
 /comparator
   /comparators
 /test



hadoop git commit: HDFS-7962. Remove duplicated logs in BlockManager. (yliu)

2015-03-20 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3f0c9e5fe - 3fac95f48


HDFS-7962. Remove duplicated logs in BlockManager. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fac95f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fac95f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fac95f4

Branch: refs/heads/branch-2
Commit: 3fac95f48d3e2768e12f17d850a87f819f95c6fb
Parents: 3f0c9e5
Author: yliu y...@apache.org
Authored: Fri Mar 20 05:31:08 2015 +0800
Committer: yliu y...@apache.org
Committed: Fri Mar 20 05:31:08 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../server/blockmanagement/BlockManager.java| 21 
 2 files changed, 10 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fac95f4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 44fbfcf..f9c4258 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -462,6 +462,8 @@ Release 2.7.0 - UNRELEASED
 HDFS.7849. Update documentation for enabling a new feature in rolling
 upgrade ( J.Andreina via vinayakumarb )
 
+HDFS-7962. Remove duplicated logs in BlockManager. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fac95f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 73c1425..e582f11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -365,10 +365,10 @@ public class BlockManager {
 
 if (!isEnabled) {
   if (UserGroupInformation.isSecurityEnabled()) {
- LOG.error(Security is enabled but block access tokens  +
- (via  + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY 
+ )  +
- aren't enabled. This may cause issues  +
- when clients attempt to talk to a DataNode.);
+LOG.error(Security is enabled but block access tokens  +
+(via  + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + )  +
+aren't enabled. This may cause issues  +
+when clients attempt to talk to a DataNode.);
   }
   return null;
 }
@@ -1223,10 +1223,6 @@ public class BlockManager {
 NumberReplicas nr = countNodes(b.stored);
 if (nr.replicasOnStaleNodes()  0) {
   blockLog.info(BLOCK* invalidateBlocks: postponing  +
-  invalidation of  + b +  on  + dn +  because  +
-  nr.replicasOnStaleNodes() +  replica(s) are located on nodes  +
-  with potentially out-of-date block reports);
-  blockLog.info(BLOCK* invalidateBlocks: postponing  +
   invalidation of {} on {} because {} replica(s) are located on  +
   nodes with potentially out-of-date block reports, b, dn,
   nr.replicasOnStaleNodes());
@@ -1499,8 +1495,10 @@ public class BlockManager {
 }
   }
 }
-blockLog.debug(BLOCK* neededReplications = {} pendingReplications = {},
-neededReplications.size(), pendingReplications.size());
+if (blockLog.isDebugEnabled()) {
+  blockLog.debug(BLOCK* neededReplications = {} pendingReplications = {},
+  neededReplications.size(), pendingReplications.size());
+}
 
 return scheduledWork;
   }
@@ -2480,9 +2478,6 @@ public class BlockManager {
   }
 } else if (result == AddBlockResult.REPLACED) {
   curReplicaDelta = 0;
-  blockLog.warn(BLOCK* addStoredBlock:  + block  + storedBlock
-  +  moved to storageType  + storageInfo.getStorageType()
-  +  on node  + node);
   blockLog.warn(BLOCK* addStoredBlock: block {} moved to storageType  +
   {} on node {}, storedBlock, storageInfo.getStorageType(), node);
 } else {



hadoop git commit: HDFS-7962. Remove duplicated logs in BlockManager. (yliu)

2015-03-20 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4e886eb9c - 978ef11f2


HDFS-7962. Remove duplicated logs in BlockManager. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/978ef11f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/978ef11f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/978ef11f

Branch: refs/heads/trunk
Commit: 978ef11f26794c22c7289582653b32268478e23e
Parents: 4e886eb
Author: yliu y...@apache.org
Authored: Fri Mar 20 05:29:25 2015 +0800
Committer: yliu y...@apache.org
Committed: Fri Mar 20 05:29:25 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../server/blockmanagement/BlockManager.java| 21 
 2 files changed, 10 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/978ef11f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b8353ad..d6fc88b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -767,6 +767,8 @@ Release 2.7.0 - UNRELEASED
 HDFS.7849. Update documentation for enabling a new feature in rolling
 upgrade ( J.Andreina via vinayakumarb )
 
+HDFS-7962. Remove duplicated logs in BlockManager. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/978ef11f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7ad8649..a2fe165 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -365,10 +365,10 @@ public class BlockManager {
 
 if (!isEnabled) {
   if (UserGroupInformation.isSecurityEnabled()) {
- LOG.error(Security is enabled but block access tokens  +
- (via  + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY 
+ )  +
- aren't enabled. This may cause issues  +
- when clients attempt to talk to a DataNode.);
+LOG.error(Security is enabled but block access tokens  +
+(via  + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + )  +
+aren't enabled. This may cause issues  +
+when clients attempt to talk to a DataNode.);
   }
   return null;
 }
@@ -1223,10 +1223,6 @@ public class BlockManager {
 NumberReplicas nr = countNodes(b.stored);
 if (nr.replicasOnStaleNodes()  0) {
   blockLog.info(BLOCK* invalidateBlocks: postponing  +
-  invalidation of  + b +  on  + dn +  because  +
-  nr.replicasOnStaleNodes() +  replica(s) are located on nodes  +
-  with potentially out-of-date block reports);
-  blockLog.info(BLOCK* invalidateBlocks: postponing  +
   invalidation of {} on {} because {} replica(s) are located on  +
   nodes with potentially out-of-date block reports, b, dn,
   nr.replicasOnStaleNodes());
@@ -1496,8 +1492,10 @@ public class BlockManager {
 }
   }
 }
-blockLog.debug(BLOCK* neededReplications = {} pendingReplications = {},
-neededReplications.size(), pendingReplications.size());
+if (blockLog.isDebugEnabled()) {
+  blockLog.debug(BLOCK* neededReplications = {} pendingReplications = {},
+  neededReplications.size(), pendingReplications.size());
+}
 
 return scheduledWork;
   }
@@ -2477,9 +2475,6 @@ public class BlockManager {
   }
 } else if (result == AddBlockResult.REPLACED) {
   curReplicaDelta = 0;
-  blockLog.warn(BLOCK* addStoredBlock:  + block  + storedBlock
-  +  moved to storageType  + storageInfo.getStorageType()
-  +  on node  + node);
   blockLog.warn(BLOCK* addStoredBlock: block {} moved to storageType  +
   {} on node {}, storedBlock, storageInfo.getStorageType(), node);
 } else {



hadoop git commit: HDFS-7962. Remove duplicated logs in BlockManager. (yliu)

2015-03-20 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 e67bd31ac - faeb8b9b5


HDFS-7962. Remove duplicated logs in BlockManager. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/faeb8b9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/faeb8b9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/faeb8b9b

Branch: refs/heads/branch-2.7
Commit: faeb8b9b53a6a8858a7ca31f930189949f6026c7
Parents: e67bd31
Author: yliu y...@apache.org
Authored: Fri Mar 20 05:32:19 2015 +0800
Committer: yliu y...@apache.org
Committed: Fri Mar 20 05:32:19 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../server/blockmanagement/BlockManager.java| 21 
 2 files changed, 10 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/faeb8b9b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ee5c665..79785d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -442,6 +442,8 @@ Release 2.7.0 - UNRELEASED
 HDFS.7849. Update documentation for enabling a new feature in rolling
 upgrade ( J.Andreina via vinayakumarb )
 
+HDFS-7962. Remove duplicated logs in BlockManager. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/faeb8b9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 73c1425..e582f11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -365,10 +365,10 @@ public class BlockManager {
 
 if (!isEnabled) {
   if (UserGroupInformation.isSecurityEnabled()) {
- LOG.error(Security is enabled but block access tokens  +
- (via  + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY 
+ )  +
- aren't enabled. This may cause issues  +
- when clients attempt to talk to a DataNode.);
+LOG.error(Security is enabled but block access tokens  +
+(via  + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + )  +
+aren't enabled. This may cause issues  +
+when clients attempt to talk to a DataNode.);
   }
   return null;
 }
@@ -1223,10 +1223,6 @@ public class BlockManager {
 NumberReplicas nr = countNodes(b.stored);
 if (nr.replicasOnStaleNodes()  0) {
   blockLog.info(BLOCK* invalidateBlocks: postponing  +
-  invalidation of  + b +  on  + dn +  because  +
-  nr.replicasOnStaleNodes() +  replica(s) are located on nodes  +
-  with potentially out-of-date block reports);
-  blockLog.info(BLOCK* invalidateBlocks: postponing  +
   invalidation of {} on {} because {} replica(s) are located on  +
   nodes with potentially out-of-date block reports, b, dn,
   nr.replicasOnStaleNodes());
@@ -1499,8 +1495,10 @@ public class BlockManager {
 }
   }
 }
-blockLog.debug(BLOCK* neededReplications = {} pendingReplications = {},
-neededReplications.size(), pendingReplications.size());
+if (blockLog.isDebugEnabled()) {
+  blockLog.debug(BLOCK* neededReplications = {} pendingReplications = {},
+  neededReplications.size(), pendingReplications.size());
+}
 
 return scheduledWork;
   }
@@ -2480,9 +2478,6 @@ public class BlockManager {
   }
 } else if (result == AddBlockResult.REPLACED) {
   curReplicaDelta = 0;
-  blockLog.warn(BLOCK* addStoredBlock:  + block  + storedBlock
-  +  moved to storageType  + storageInfo.getStorageType()
-  +  on node  + node);
   blockLog.warn(BLOCK* addStoredBlock: block {} moved to storageType  +
   {} on node {}, storedBlock, storageInfo.getStorageType(), node);
 } else {



hadoop git commit: HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)

2015-03-19 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 0be2461df - 0da3fcb4b


HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0da3fcb4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0da3fcb4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0da3fcb4

Branch: refs/heads/branch-2.7
Commit: 0da3fcb4b0e6e7bff80c59d16b58bb4178093669
Parents: 0be2461
Author: yliu y...@apache.org
Authored: Thu Mar 19 23:26:26 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Mar 19 23:26:26 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 40 
 .../hdfs/server/namenode/FSNamesystem.java  |  8 +++-
 .../hdfs/server/namenode/TestFileTruncate.java  | 10 +
 4 files changed, 50 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da3fcb4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7e2348c..ee5c665 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -895,6 +895,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7932. Speed up the shutdown of datanode during rolling 
upgrade.(kihwal)
 
+HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da3fcb4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8f22558..73c1425 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1955,6 +1955,46 @@ public class BlockManager {
   }
 
   /**
+   * Mark block replicas as corrupt except those on the storages in 
+   * newStorages list.
+   */
+  public void markBlockReplicasAsCorrupt(BlockInfoContiguous block, 
+  long oldGenerationStamp, long oldNumBytes, 
+  DatanodeStorageInfo[] newStorages) throws IOException {
+assert namesystem.hasWriteLock();
+BlockToMarkCorrupt b = null;
+if (block.getGenerationStamp() != oldGenerationStamp) {
+  b = new BlockToMarkCorrupt(block, oldGenerationStamp,
+  genstamp does not match  + oldGenerationStamp
+  +  :  + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
+} else if (block.getNumBytes() != oldNumBytes) {
+  b = new BlockToMarkCorrupt(block,
+  length does not match  + oldNumBytes
+  +  :  + block.getNumBytes(), Reason.SIZE_MISMATCH);
+} else {
+  return;
+}
+
+for (DatanodeStorageInfo storage : getStorages(block)) {
+  boolean isCorrupt = true;
+  if (newStorages != null) {
+for (DatanodeStorageInfo newStorage : newStorages) {
+  if (newStorage!= null  storage.equals(newStorage)) {
+isCorrupt = false;
+break;
+  }
+}
+  }
+  if (isCorrupt) {
+blockLog.info(BLOCK* markBlockReplicasAsCorrupt: mark block replica +
+ {} on {} as corrupt because the dn is not in the new committed  
+
+storage list., b, storage.getDatanodeDescriptor());
+markBlockAsCorrupt(b, storage, storage.getDatanodeDescriptor());
+  }
+}
+  }
+
+  /**
* processFirstBlockReport is intended only for processing initial block
* reports, the first block report received from a DN after it registers.
* It just adds all the valid replicas to the datanode, without calculating 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da3fcb4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a8072a9..ad17e8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org

hadoop git commit: HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)

2015-03-19 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 91baca145 - e37ca221b


HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e37ca221
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e37ca221
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e37ca221

Branch: refs/heads/trunk
Commit: e37ca221bf4e9ae5d5e667d8ca284df9fdb33199
Parents: 91baca1
Author: yliu y...@apache.org
Authored: Thu Mar 19 23:23:19 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Mar 19 23:23:19 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 40 
 .../hdfs/server/namenode/FSNamesystem.java  |  8 +++-
 .../hdfs/server/namenode/TestFileTruncate.java  | 10 +
 4 files changed, 50 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37ca221/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7392964..b8353ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1218,6 +1218,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7932. Speed up the shutdown of datanode during rolling 
upgrade.(kihwal)
 
+HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37ca221/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f155375..7ad8649 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1952,6 +1952,46 @@ public class BlockManager {
   }
 
   /**
+   * Mark block replicas as corrupt except those on the storages in 
+   * newStorages list.
+   */
+  public void markBlockReplicasAsCorrupt(BlockInfoContiguous block, 
+  long oldGenerationStamp, long oldNumBytes, 
+  DatanodeStorageInfo[] newStorages) throws IOException {
+assert namesystem.hasWriteLock();
+BlockToMarkCorrupt b = null;
+if (block.getGenerationStamp() != oldGenerationStamp) {
+  b = new BlockToMarkCorrupt(block, oldGenerationStamp,
+  genstamp does not match  + oldGenerationStamp
+  +  :  + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
+} else if (block.getNumBytes() != oldNumBytes) {
+  b = new BlockToMarkCorrupt(block,
+  length does not match  + oldNumBytes
+  +  :  + block.getNumBytes(), Reason.SIZE_MISMATCH);
+} else {
+  return;
+}
+
+for (DatanodeStorageInfo storage : getStorages(block)) {
+  boolean isCorrupt = true;
+  if (newStorages != null) {
+for (DatanodeStorageInfo newStorage : newStorages) {
+  if (newStorage!= null  storage.equals(newStorage)) {
+isCorrupt = false;
+break;
+  }
+}
+  }
+  if (isCorrupt) {
+blockLog.info(BLOCK* markBlockReplicasAsCorrupt: mark block replica +
+ {} on {} as corrupt because the dn is not in the new committed  
+
+storage list., b, storage.getDatanodeDescriptor());
+markBlockAsCorrupt(b, storage, storage.getDatanodeDescriptor());
+  }
+}
+  }
+
+  /**
* processFirstBlockReport is intended only for processing initial block
* reports, the first block report received from a DN after it registers.
* It just adds all the valid replicas to the datanode, without calculating 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e37ca221/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 8822965..3aefbf6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache

hadoop git commit: HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)

2015-03-19 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cabe676d6 - 90164ffd8


HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90164ffd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90164ffd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90164ffd

Branch: refs/heads/branch-2
Commit: 90164ffd84f6ef56e9f8f99dcc7424a8d115dbae
Parents: cabe676
Author: yliu y...@apache.org
Authored: Thu Mar 19 23:24:55 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Mar 19 23:24:55 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 40 
 .../hdfs/server/namenode/FSNamesystem.java  |  8 +++-
 .../hdfs/server/namenode/TestFileTruncate.java  | 10 +
 4 files changed, 50 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90164ffd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ead8912..44fbfcf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -915,6 +915,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7932. Speed up the shutdown of datanode during rolling 
upgrade.(kihwal)
 
+HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90164ffd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8f22558..73c1425 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1955,6 +1955,46 @@ public class BlockManager {
   }
 
   /**
+   * Mark block replicas as corrupt except those on the storages in 
+   * newStorages list.
+   */
+  public void markBlockReplicasAsCorrupt(BlockInfoContiguous block, 
+  long oldGenerationStamp, long oldNumBytes, 
+  DatanodeStorageInfo[] newStorages) throws IOException {
+assert namesystem.hasWriteLock();
+BlockToMarkCorrupt b = null;
+if (block.getGenerationStamp() != oldGenerationStamp) {
+  b = new BlockToMarkCorrupt(block, oldGenerationStamp,
+  genstamp does not match  + oldGenerationStamp
+  +  :  + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
+} else if (block.getNumBytes() != oldNumBytes) {
+  b = new BlockToMarkCorrupt(block,
+  length does not match  + oldNumBytes
+  +  :  + block.getNumBytes(), Reason.SIZE_MISMATCH);
+} else {
+  return;
+}
+
+for (DatanodeStorageInfo storage : getStorages(block)) {
+  boolean isCorrupt = true;
+  if (newStorages != null) {
+for (DatanodeStorageInfo newStorage : newStorages) {
+  if (newStorage!= null  storage.equals(newStorage)) {
+isCorrupt = false;
+break;
+  }
+}
+  }
+  if (isCorrupt) {
+blockLog.info(BLOCK* markBlockReplicasAsCorrupt: mark block replica +
+ {} on {} as corrupt because the dn is not in the new committed  
+
+storage list., b, storage.getDatanodeDescriptor());
+markBlockAsCorrupt(b, storage, storage.getDatanodeDescriptor());
+  }
+}
+  }
+
+  /**
* processFirstBlockReport is intended only for processing initial block
* reports, the first block report received from a DN after it registers.
* It just adds all the valid replicas to the datanode, without calculating 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90164ffd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a8072a9..ad17e8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org

hadoop git commit: HDFS-7838. Expose truncate API for libhdfs. (yliu)

2015-03-17 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk ef9946cd5 - 48c2db34e


HDFS-7838. Expose truncate API for libhdfs. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48c2db34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48c2db34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48c2db34

Branch: refs/heads/trunk
Commit: 48c2db34eff376c0f3a72587a5540b1e3dffafd2
Parents: ef9946c
Author: yliu y...@apache.org
Authored: Tue Mar 17 07:22:17 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Mar 17 07:22:17 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../src/contrib/libwebhdfs/src/hdfs_web.c   |  6 
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.c  | 37 
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.h  | 15 
 4 files changed, 60 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48c2db34/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9339b97..ad3e880 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -364,6 +364,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-6488. Support HDFS superuser in NFS gateway. (brandonli)
 
+HDFS-7838. Expose truncate API for libhdfs. (yliu)
+
   IMPROVEMENTS
 
 HDFS-7752. Improve description for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48c2db34/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
index deb11ef..86b4faf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
@@ -1124,6 +1124,12 @@ done:
 return file;
 }
 
+int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength)
+{
+errno = ENOTSUP;
+return -1;
+}
+
 tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer, tSize length)
 {
 if (length == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48c2db34/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
index 34a..504d47e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
@@ -1037,6 +1037,43 @@ done:
 return file;
 }
 
+int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength)
+{
+jobject jFS = (jobject)fs;
+jthrowable jthr;
+jvalue jVal;
+jobject jPath = NULL;
+
+JNIEnv *env = getJNIEnv();
+
+if (!env) {
+errno = EINTERNAL;
+return -1;
+}
+
+/* Create an object of org.apache.hadoop.fs.Path */
+jthr = constructNewObjectOfPath(env, path, jPath);
+if (jthr) {
+errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+hdfsTruncateFile(%s): constructNewObjectOfPath, path);
+return -1;
+}
+
+jthr = invokeMethod(env, jVal, INSTANCE, jFS, HADOOP_FS,
+truncate, JMETHOD2(JPARAM(HADOOP_PATH), J, Z),
+jPath, newlength);
+destroyLocalReference(env, jPath);
+if (jthr) {
+errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+hdfsTruncateFile(%s): FileSystem#truncate, path);
+return -1;
+}
+if (jVal.z == JNI_TRUE) {
+return 1;
+}
+return 0;
+}
+
 int hdfsUnbufferFile(hdfsFile file)
 {
 int ret;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48c2db34/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
index 64889ed..5b7bc1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
@@ -396,6 +396,21 @@ extern  C {
   int bufferSize, short replication, tSize blocksize);
 
 /**
+ * hdfsTruncateFile - Truncate a hdfs file to given lenght.
+ * @param fs The configured filesystem handle.
+ * @param path The full path to the file.
+ * @param newlength

hadoop git commit: HDFS-7838. Expose truncate API for libhdfs. (yliu)

2015-03-17 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 77297017d - 991ac04af


HDFS-7838. Expose truncate API for libhdfs. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/991ac04a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/991ac04a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/991ac04a

Branch: refs/heads/branch-2
Commit: 991ac04afc3a7cea59993a304b7c6b1286ac8c4f
Parents: 7729701
Author: yliu y...@apache.org
Authored: Tue Mar 17 07:24:20 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Mar 17 07:24:20 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../src/contrib/libwebhdfs/src/hdfs_web.c   |  6 
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.c  | 37 
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.h  | 15 
 4 files changed, 60 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/991ac04a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f788a9b..8e1a696 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -53,6 +53,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-6488. Support HDFS superuser in NFS gateway. (brandonli)
 
+HDFS-7838. Expose truncate API for libhdfs. (yliu)
+
   IMPROVEMENTS
 
 HDFS-7752. Improve description for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/991ac04a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
index deb11ef..86b4faf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
@@ -1124,6 +1124,12 @@ done:
 return file;
 }
 
+int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength)
+{
+errno = ENOTSUP;
+return -1;
+}
+
 tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer, tSize length)
 {
 if (length == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/991ac04a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
index 27a2809..5c39dde 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
@@ -1037,6 +1037,43 @@ done:
 return file;
 }
 
+int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength)
+{
+jobject jFS = (jobject)fs;
+jthrowable jthr;
+jvalue jVal;
+jobject jPath = NULL;
+
+JNIEnv *env = getJNIEnv();
+
+if (!env) {
+errno = EINTERNAL;
+return -1;
+}
+
+/* Create an object of org.apache.hadoop.fs.Path */
+jthr = constructNewObjectOfPath(env, path, jPath);
+if (jthr) {
+errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+hdfsTruncateFile(%s): constructNewObjectOfPath, path);
+return -1;
+}
+
+jthr = invokeMethod(env, jVal, INSTANCE, jFS, HADOOP_FS,
+truncate, JMETHOD2(JPARAM(HADOOP_PATH), J, Z),
+jPath, newlength);
+destroyLocalReference(env, jPath);
+if (jthr) {
+errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+hdfsTruncateFile(%s): FileSystem#truncate, path);
+return -1;
+}
+if (jVal.z == JNI_TRUE) {
+return 1;
+}
+return 0;
+}
+
 int hdfsUnbufferFile(hdfsFile file)
 {
 int ret;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/991ac04a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
index 64889ed..5b7bc1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
@@ -396,6 +396,21 @@ extern  C {
   int bufferSize, short replication, tSize blocksize);
 
 /**
+ * hdfsTruncateFile - Truncate a hdfs file to given lenght.
+ * @param fs The configured filesystem handle.
+ * @param path The full path to the file.
+ * @param newlength

hadoop git commit: HDFS-7838. Expose truncate API for libhdfs. (yliu)

2015-03-17 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 51c374ac1 - ef9d46dcb


HDFS-7838. Expose truncate API for libhdfs. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef9d46dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef9d46dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef9d46dc

Branch: refs/heads/branch-2.7
Commit: ef9d46dcb6bc71f1ad6ce5b2e439cd443b589224
Parents: 51c374a
Author: yliu y...@apache.org
Authored: Tue Mar 17 07:25:58 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Mar 17 07:25:58 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../src/contrib/libwebhdfs/src/hdfs_web.c   |  6 
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.c  | 37 
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.h  | 15 
 4 files changed, 60 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef9d46dc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 91d3459..3f5da9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -39,6 +39,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-6488. Support HDFS superuser in NFS gateway. (brandonli)
 
+HDFS-7838. Expose truncate API for libhdfs. (yliu)
+
   IMPROVEMENTS
 
 HDFS-7752. Improve description for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef9d46dc/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
index deb11ef..86b4faf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
@@ -1124,6 +1124,12 @@ done:
 return file;
 }
 
+int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength)
+{
+errno = ENOTSUP;
+return -1;
+}
+
 tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer, tSize length)
 {
 if (length == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef9d46dc/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
index 27a2809..5c39dde 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
@@ -1037,6 +1037,43 @@ done:
 return file;
 }
 
+int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength)
+{
+jobject jFS = (jobject)fs;
+jthrowable jthr;
+jvalue jVal;
+jobject jPath = NULL;
+
+JNIEnv *env = getJNIEnv();
+
+if (!env) {
+errno = EINTERNAL;
+return -1;
+}
+
+/* Create an object of org.apache.hadoop.fs.Path */
+jthr = constructNewObjectOfPath(env, path, jPath);
+if (jthr) {
+errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+hdfsTruncateFile(%s): constructNewObjectOfPath, path);
+return -1;
+}
+
+jthr = invokeMethod(env, jVal, INSTANCE, jFS, HADOOP_FS,
+truncate, JMETHOD2(JPARAM(HADOOP_PATH), J, Z),
+jPath, newlength);
+destroyLocalReference(env, jPath);
+if (jthr) {
+errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+hdfsTruncateFile(%s): FileSystem#truncate, path);
+return -1;
+}
+if (jVal.z == JNI_TRUE) {
+return 1;
+}
+return 0;
+}
+
 int hdfsUnbufferFile(hdfsFile file)
 {
 int ret;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef9d46dc/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
index 64889ed..5b7bc1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
@@ -396,6 +396,21 @@ extern  C {
   int bufferSize, short replication, tSize blocksize);
 
 /**
+ * hdfsTruncateFile - Truncate a hdfs file to given lenght.
+ * @param fs The configured filesystem handle.
+ * @param path The full path to the file.
+ * @param newlength

hadoop git commit: HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt synchronization. (Sean Busbey via yliu)

2015-03-12 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 32e62cb3d - 813c93cb2


HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt 
synchronization. (Sean Busbey via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/813c93cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/813c93cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/813c93cb

Branch: refs/heads/branch-2
Commit: 813c93cb250d6d556604fe98845b979970bd5e18
Parents: 32e62cb
Author: yliu y...@apache.org
Authored: Fri Mar 13 02:26:16 2015 +0800
Committer: yliu y...@apache.org
Committed: Fri Mar 13 02:26:16 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../apache/hadoop/crypto/CryptoOutputStream.java | 19 ---
 2 files changed, 15 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/813c93cb/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3317dfb..2745e18 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -683,6 +683,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11693. Azure Storage FileSystem rename operations are throttled too
 aggressively to complete HBase WAL archiving. (Duo Xu via cnauroth)
 
+HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt
+synchronization. (Sean Busbey via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/813c93cb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index 120b378..1753019 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -40,6 +40,9 @@ import com.google.common.base.Preconditions;
  * padding = pos%(algorithm blocksize); 
  * p/
  * The underlying stream offset is maintained as state.
+ *
+ * Note that while some of this class' methods are synchronized, this is just 
to
+ * match the threadsafety behavior of DFSOutputStream. See HADOOP-11710.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -126,7 +129,7 @@ public class CryptoOutputStream extends FilterOutputStream 
implements
* @throws IOException
*/
   @Override
-  public void write(byte[] b, int off, int len) throws IOException {
+  public synchronized void write(byte[] b, int off, int len) throws 
IOException {
 checkStream();
 if (b == null) {
   throw new NullPointerException();
@@ -213,14 +216,16 @@ public class CryptoOutputStream extends 
FilterOutputStream implements
   }
   
   @Override
-  public void close() throws IOException {
+  public synchronized void close() throws IOException {
 if (closed) {
   return;
 }
-
-super.close();
-freeBuffers();
-closed = true;
+try {
+  super.close();
+  freeBuffers();
+} finally {
+  closed = true;
+}
   }
   
   /**
@@ -228,7 +233,7 @@ public class CryptoOutputStream extends FilterOutputStream 
implements
* underlying stream, then do the flush.
*/
   @Override
-  public void flush() throws IOException {
+  public synchronized void flush() throws IOException {
 checkStream();
 encrypt();
 super.flush();



hadoop git commit: HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt synchronization. (Sean Busbey via yliu)

2015-03-12 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 821287741 - a85291003


HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt 
synchronization. (Sean Busbey via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8529100
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8529100
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8529100

Branch: refs/heads/trunk
Commit: a85291003cf3e3fd79b6addcf59d4f43dc72d356
Parents: 8212877
Author: yliu y...@apache.org
Authored: Fri Mar 13 02:25:02 2015 +0800
Committer: yliu y...@apache.org
Committed: Fri Mar 13 02:25:02 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../apache/hadoop/crypto/CryptoOutputStream.java | 19 ---
 2 files changed, 15 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8529100/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6970bad..55028cb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1097,6 +1097,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11693. Azure Storage FileSystem rename operations are throttled too
 aggressively to complete HBase WAL archiving. (Duo Xu via cnauroth)
 
+HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt
+synchronization. (Sean Busbey via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8529100/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index f1ea0fc..bc09b8c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -40,6 +40,9 @@ import com.google.common.base.Preconditions;
  * padding = pos%(algorithm blocksize); 
  * p/
  * The underlying stream offset is maintained as state.
+ *
+ * Note that while some of this class' methods are synchronized, this is just 
to
+ * match the threadsafety behavior of DFSOutputStream. See HADOOP-11710.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -126,7 +129,7 @@ public class CryptoOutputStream extends FilterOutputStream 
implements
* @throws IOException
*/
   @Override
-  public void write(byte[] b, int off, int len) throws IOException {
+  public synchronized void write(byte[] b, int off, int len) throws 
IOException {
 checkStream();
 if (b == null) {
   throw new NullPointerException();
@@ -213,14 +216,16 @@ public class CryptoOutputStream extends 
FilterOutputStream implements
   }
   
   @Override
-  public void close() throws IOException {
+  public synchronized void close() throws IOException {
 if (closed) {
   return;
 }
-
-super.close();
-freeBuffers();
-closed = true;
+try {
+  super.close();
+  freeBuffers();
+} finally {
+  closed = true;
+}
   }
   
   /**
@@ -228,7 +233,7 @@ public class CryptoOutputStream extends FilterOutputStream 
implements
* underlying stream, then do the flush.
*/
   @Override
-  public void flush() throws IOException {
+  public synchronized void flush() throws IOException {
 checkStream();
 encrypt();
 super.flush();



hadoop git commit: HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream should be non static. (Sean Busbey via yliu)

2015-03-04 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f805d48b1 - b569c3ab1


HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream should be 
non static. (Sean Busbey via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b569c3ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b569c3ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b569c3ab

Branch: refs/heads/branch-2
Commit: b569c3ab1cb7e328dde822f6b2405d24b9560e3a
Parents: f805d48
Author: yliu y...@apache.org
Authored: Thu Mar 5 06:39:58 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Mar 5 06:39:58 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java | 2 +-
 .../main/java/org/apache/hadoop/crypto/CryptoOutputStream.java| 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b569c3ab/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d7b0980..0159a0d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -634,6 +634,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11666. Revert the format change of du output introduced by
 HADOOP-6857. (Byron Wong via aajisaka)
 
+HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
+should be non static. (Sean Busbey via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b569c3ab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
index f3e5b90..2e87f91 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
@@ -60,7 +60,7 @@ public class CryptoInputStream extends FilterInputStream 
implements
 Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
 CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, 
 ReadableByteChannel {
-  private static final byte[] oneByteBuf = new byte[1];
+  private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Decryptor decryptor;
   private final int bufferSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b569c3ab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index ce72700..120b378 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -45,7 +45,7 @@ import com.google.common.base.Preconditions;
 @InterfaceStability.Evolving
 public class CryptoOutputStream extends FilterOutputStream implements 
 Syncable, CanSetDropBehind {
-  private static final byte[] oneByteBuf = new byte[1];
+  private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Encryptor encryptor;
   private final int bufferSize;



hadoop git commit: HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream should be non static. (Sean Busbey via yliu)

2015-03-04 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 74a4754d1 - 5e9b8144d


HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream should be 
non static. (Sean Busbey via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e9b8144
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e9b8144
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e9b8144

Branch: refs/heads/trunk
Commit: 5e9b8144d54f586803212a0bdd8b1c25bdbb1e97
Parents: 74a4754
Author: yliu y...@apache.org
Authored: Thu Mar 5 06:38:45 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Mar 5 06:38:45 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java | 2 +-
 .../main/java/org/apache/hadoop/crypto/CryptoOutputStream.java| 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e9b8144/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 92af646..65c6d85 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1041,6 +1041,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11605. FilterFileSystem#create with ChecksumOpt should propagate it
 to wrapped FS. (gera)
 
+HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
+should be non static. (Sean Busbey via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e9b8144/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
index f3e5b90..2e87f91 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
@@ -60,7 +60,7 @@ public class CryptoInputStream extends FilterInputStream 
implements
 Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
 CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, 
 ReadableByteChannel {
-  private static final byte[] oneByteBuf = new byte[1];
+  private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Decryptor decryptor;
   private final int bufferSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e9b8144/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index 876ffd6..f1ea0fc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -45,7 +45,7 @@ import com.google.common.base.Preconditions;
 @InterfaceStability.Evolving
 public class CryptoOutputStream extends FilterOutputStream implements 
 Syncable, CanSetDropBehind {
-  private static final byte[] oneByteBuf = new byte[1];
+  private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Encryptor encryptor;
   private final int bufferSize;



hadoop git commit: HDFS-7740. Test truncate with DataNodes restarting. (yliu)

2015-02-20 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 199a853ed - eca1588db


HDFS-7740. Test truncate with DataNodes restarting. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eca1588d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eca1588d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eca1588d

Branch: refs/heads/branch-2
Commit: eca1588db82a40dbd071bac6be1361168902b9e9
Parents: 199a853
Author: yliu y...@apache.org
Authored: Sat Feb 21 06:33:58 2015 +0800
Committer: yliu y...@apache.org
Committed: Sat Feb 21 06:33:58 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hdfs/server/namenode/TestFileTruncate.java  | 221 +++
 2 files changed, 223 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eca1588d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 365a1e2..9bdccfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -367,6 +367,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7773. Additional metrics in HDFS to be accessed via jmx.
 (Anu Engineer via cnauroth)
 
+HDFS-7740. Test truncate with DataNodes restarting. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eca1588d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 253727d..19b5cde 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -93,6 +94,8 @@ public class TestFileTruncate {
 conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
 conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
+conf.setLong(
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
 cluster = new MiniDFSCluster.Builder(conf)
 .format(true)
 .numDataNodes(DATANODE_NUM)
@@ -623,6 +626,224 @@ public class TestFileTruncate {
   }
 
   /**
+   * The last block is truncated at mid. (non copy-on-truncate)
+   * dn0 is shutdown before truncate and restart after truncate successful.
+   */
+  @Test(timeout=6)
+  public void testTruncateWithDataNodesRestart() throws Exception {
+int startingFileSize = 3 * BLOCK_SIZE;
+byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
+final Path parent = new Path(/test);
+final Path p = new Path(parent, testTruncateWithDataNodesRestart);
+
+writeContents(contents, startingFileSize, p);
+LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
+
+int dn = 0;
+int toTruncateLength = 1;
+int newLength = startingFileSize - toTruncateLength;
+cluster.getDataNodes().get(dn).shutdown();
+try {
+  boolean isReady = fs.truncate(p, newLength);
+  assertFalse(isReady);
+} finally {
+  cluster.restartDataNode(dn);
+  cluster.waitActive();
+  cluster.triggerBlockReports();
+}
+
+LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
+/*
+ * For non copy-on-truncate, the truncated block id is the same, but the 
+ * GS should increase.
+ * We trigger block report for dn0 after it restarts, since the GS 
+ * of replica for the last block on it is old, so the reported last block
+ * from dn0 should be marked corrupt on nn and the replicas of last block 
+ * on nn should decrease 1, then the truncated block will be replicated 
+ * to dn0.
+ */
+assertEquals(newBlock.getBlock().getBlockId

hadoop git commit: HDFS-7740. Test truncate with DataNodes restarting. (yliu)

2015-02-20 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6f0133039 - 737bad02d


HDFS-7740. Test truncate with DataNodes restarting. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/737bad02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/737bad02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/737bad02

Branch: refs/heads/trunk
Commit: 737bad02d4cf879fa7d20b7c0e083d9dc59f604c
Parents: 6f01330
Author: yliu y...@apache.org
Authored: Sat Feb 21 06:32:34 2015 +0800
Committer: yliu y...@apache.org
Committed: Sat Feb 21 06:32:34 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hdfs/server/namenode/TestFileTruncate.java  | 221 +++
 2 files changed, 223 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/737bad02/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c47b89d..c8b6610 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -666,6 +666,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7773. Additional metrics in HDFS to be accessed via jmx.
 (Anu Engineer via cnauroth)
 
+HDFS-7740. Test truncate with DataNodes restarting. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/737bad02/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 253727d..19b5cde 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -93,6 +94,8 @@ public class TestFileTruncate {
 conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
 conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
+conf.setLong(
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
 cluster = new MiniDFSCluster.Builder(conf)
 .format(true)
 .numDataNodes(DATANODE_NUM)
@@ -623,6 +626,224 @@ public class TestFileTruncate {
   }
 
   /**
+   * The last block is truncated at mid. (non copy-on-truncate)
+   * dn0 is shutdown before truncate and restart after truncate successful.
+   */
+  @Test(timeout=6)
+  public void testTruncateWithDataNodesRestart() throws Exception {
+int startingFileSize = 3 * BLOCK_SIZE;
+byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
+final Path parent = new Path(/test);
+final Path p = new Path(parent, testTruncateWithDataNodesRestart);
+
+writeContents(contents, startingFileSize, p);
+LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
+
+int dn = 0;
+int toTruncateLength = 1;
+int newLength = startingFileSize - toTruncateLength;
+cluster.getDataNodes().get(dn).shutdown();
+try {
+  boolean isReady = fs.truncate(p, newLength);
+  assertFalse(isReady);
+} finally {
+  cluster.restartDataNode(dn);
+  cluster.waitActive();
+  cluster.triggerBlockReports();
+}
+
+LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
+/*
+ * For non copy-on-truncate, the truncated block id is the same, but the 
+ * GS should increase.
+ * We trigger block report for dn0 after it restarts, since the GS 
+ * of replica for the last block on it is old, so the reported last block
+ * from dn0 should be marked corrupt on nn and the replicas of last block 
+ * on nn should decrease 1, then the truncated block will be replicated 
+ * to dn0.
+ */
+assertEquals(newBlock.getBlock().getBlockId

hadoop git commit: HADOOP-11595. Add default implementation for AbstractFileSystem#truncate. (yliu)

2015-02-19 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk a19820f2f - 64a837563


HADOOP-11595. Add default implementation for AbstractFileSystem#truncate. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64a83756
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64a83756
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64a83756

Branch: refs/heads/trunk
Commit: 64a83756350d9d0f07b72c84f2719e82cf78ee49
Parents: a19820f
Author: yliu y...@apache.org
Authored: Thu Feb 19 08:26:42 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Feb 19 08:26:42 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../main/java/org/apache/hadoop/fs/AbstractFileSystem.java| 7 +--
 .../src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java  | 6 --
 3 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64a83756/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e6d560a..c01e3d6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -970,6 +970,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11545. ArrayIndexOutOfBoundsException is thrown with hadoop
 credential list -provider. (Brahma Reddy Battula via aajisaka)
 
+HADOOP-11595. Add default implementation for AbstractFileSystem#truncate.
+(yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64a83756/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 975cc3c..959d9d5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -642,9 +642,12 @@ public abstract class AbstractFileSystem {
* {@link FileContext#truncate(Path, long)} except that Path f must be for
* this file system.
*/
-  public abstract boolean truncate(Path f, long newLength)
+  public boolean truncate(Path f, long newLength)
   throws AccessControlException, FileNotFoundException,
-  UnresolvedLinkException, IOException;
+  UnresolvedLinkException, IOException {
+throw new UnsupportedOperationException(getClass().getSimpleName()
++  doesn't support truncate);
+  }
 
   /**
* The specification of this method matches that of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64a83756/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
index 6b9378d..3bd14f1 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
@@ -141,12 +141,6 @@ public class TestAfsCheckPath {
 }
 
 @Override
-public boolean truncate(Path f, long newLength) throws IOException {
-  // deliberately empty
-  return false;
-}
-
-@Override
 public void renameInternal(Path src, Path dst) throws IOException {
   // deliberately empty
 }



hadoop git commit: HDFS-7656. Expose truncate API for HDFS httpfs. (yliu)

2015-02-19 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 64a837563 - 2fd02afec


HDFS-7656. Expose truncate API for HDFS httpfs. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fd02afe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fd02afe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fd02afe

Branch: refs/heads/trunk
Commit: 2fd02afeca3710f487b6a039a65c1a666322b229
Parents: 64a8375
Author: yliu y...@apache.org
Authored: Thu Feb 19 08:36:31 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Feb 19 08:36:31 2015 +0800

--
 .../hadoop/fs/http/client/HttpFSFileSystem.java | 24 ++-
 .../hadoop/fs/http/server/FSOperations.java | 43 +++-
 .../http/server/HttpFSParametersProvider.java   | 20 +
 .../hadoop/fs/http/server/HttpFSServer.java | 10 +
 .../fs/http/client/BaseTestHttpFSWith.java  | 40 --
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 6 files changed, 133 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd02afe/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 5b079e9..20b212e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -109,12 +109,15 @@ public class HttpFSFileSystem extends FileSystem
   public static final String XATTR_VALUE_PARAM = xattr.value;
   public static final String XATTR_SET_FLAG_PARAM = flag;
   public static final String XATTR_ENCODING_PARAM = encoding;
+  public static final String NEW_LENGTH_PARAM = newlength;
 
   public static final Short DEFAULT_PERMISSION = 0755;
   public static final String ACLSPEC_DEFAULT = ;
 
   public static final String RENAME_JSON = boolean;
 
+  public static final String TRUNCATE_JSON = boolean;
+
   public static final String DELETE_JSON = boolean;
 
   public static final String MKDIRS_JSON = boolean;
@@ -191,7 +194,7 @@ public class HttpFSFileSystem extends FileSystem
 GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
 GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),
 INSTRUMENTATION(HTTP_GET), GETACLSTATUS(HTTP_GET),
-APPEND(HTTP_POST), CONCAT(HTTP_POST),
+APPEND(HTTP_POST), CONCAT(HTTP_POST), TRUNCATE(HTTP_POST),
 CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
 SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
 MODIFYACLENTRIES(HTTP_PUT), REMOVEACLENTRIES(HTTP_PUT),
@@ -568,6 +571,25 @@ public class HttpFSFileSystem extends FileSystem
   }
 
   /**
+   * Truncate a file.
+   * 
+   * @param f the file to be truncated.
+   * @param newLength The size the file is to be truncated to.
+   *
+   * @throws IOException
+   */
+  @Override
+  public boolean truncate(Path f, long newLength) throws IOException {
+MapString, String params = new HashMapString, String();
+params.put(OP_PARAM, Operation.TRUNCATE.toString());
+params.put(NEW_LENGTH_PARAM, Long.toString(newLength));
+HttpURLConnection conn = getConnection(Operation.TRUNCATE.getMethod(),
+params, f, true);
+JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+return (Boolean) json.get(TRUNCATE_JSON);
+  }
+
+  /**
* Concat existing files together.
* @param f the path to the target destination.
* @param psrcs the paths to the sources to use for the concatenation.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd02afe/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 4b72a51..bc290a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -364,7 +364,7 @@ public class FSOperations {
   }
 
   /**
-   * Executor that performs an append FileSystemAccess files system operation.
+   * Executor that performs a concat

hadoop git commit: HDFS-7656. Expose truncate API for HDFS httpfs. (yliu)

2015-02-19 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 000b9e6c2 - fee29e4a4


HDFS-7656. Expose truncate API for HDFS httpfs. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fee29e4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fee29e4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fee29e4a

Branch: refs/heads/branch-2
Commit: fee29e4a4f34c2dd1202411cdc25cd7d3a9cab41
Parents: 000b9e6
Author: yliu y...@apache.org
Authored: Thu Feb 19 08:38:25 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Feb 19 08:38:25 2015 +0800

--
 .../hadoop/fs/http/client/HttpFSFileSystem.java | 24 ++-
 .../hadoop/fs/http/server/FSOperations.java | 43 +++-
 .../http/server/HttpFSParametersProvider.java   | 20 +
 .../hadoop/fs/http/server/HttpFSServer.java | 10 +
 .../fs/http/client/BaseTestHttpFSWith.java  | 40 --
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 6 files changed, 133 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fee29e4a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 74e5be8..358f0e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -109,12 +109,15 @@ public class HttpFSFileSystem extends FileSystem
   public static final String XATTR_VALUE_PARAM = xattr.value;
   public static final String XATTR_SET_FLAG_PARAM = flag;
   public static final String XATTR_ENCODING_PARAM = encoding;
+  public static final String NEW_LENGTH_PARAM = newlength;
 
   public static final Short DEFAULT_PERMISSION = 0755;
   public static final String ACLSPEC_DEFAULT = ;
 
   public static final String RENAME_JSON = boolean;
 
+  public static final String TRUNCATE_JSON = boolean;
+
   public static final String DELETE_JSON = boolean;
 
   public static final String MKDIRS_JSON = boolean;
@@ -191,7 +194,7 @@ public class HttpFSFileSystem extends FileSystem
 GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
 GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),
 INSTRUMENTATION(HTTP_GET), GETACLSTATUS(HTTP_GET),
-APPEND(HTTP_POST), CONCAT(HTTP_POST),
+APPEND(HTTP_POST), CONCAT(HTTP_POST), TRUNCATE(HTTP_POST),
 CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
 SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
 MODIFYACLENTRIES(HTTP_PUT), REMOVEACLENTRIES(HTTP_PUT),
@@ -569,6 +572,25 @@ public class HttpFSFileSystem extends FileSystem
   }
 
   /**
+   * Truncate a file.
+   * 
+   * @param f the file to be truncated.
+   * @param newLength The size the file is to be truncated to.
+   *
+   * @throws IOException
+   */
+  @Override
+  public boolean truncate(Path f, long newLength) throws IOException {
+MapString, String params = new HashMapString, String();
+params.put(OP_PARAM, Operation.TRUNCATE.toString());
+params.put(NEW_LENGTH_PARAM, Long.toString(newLength));
+HttpURLConnection conn = getConnection(Operation.TRUNCATE.getMethod(),
+params, f, true);
+JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+return (Boolean) json.get(TRUNCATE_JSON);
+  }
+
+  /**
* Concat existing files together.
* @param f the path to the target destination.
* @param psrcs the paths to the sources to use for the concatenation.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fee29e4a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index e7d92f5..633589c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -363,7 +363,7 @@ public class FSOperations {
   }
 
   /**
-   * Executor that performs an append FileSystemAccess files system operation.
+   * Executor that performs a concat

hadoop git commit: HADOOP-11595. Add default implementation for AbstractFileSystem#truncate. (yliu)

2015-02-19 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 32d215dc8 - 000b9e6c2


HADOOP-11595. Add default implementation for AbstractFileSystem#truncate. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/000b9e6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/000b9e6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/000b9e6c

Branch: refs/heads/branch-2
Commit: 000b9e6c245e3a922136a05c4e5a6a62629c9c48
Parents: 32d215d
Author: yliu y...@apache.org
Authored: Thu Feb 19 08:28:15 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Feb 19 08:28:15 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../main/java/org/apache/hadoop/fs/AbstractFileSystem.java| 7 +--
 .../src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java  | 6 --
 3 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/000b9e6c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 21be30a..b93a801 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -562,6 +562,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11545. ArrayIndexOutOfBoundsException is thrown with hadoop
 credential list -provider. (Brahma Reddy Battula via aajisaka)
 
+HADOOP-11595. Add default implementation for AbstractFileSystem#truncate.
+(yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/000b9e6c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 975cc3c..959d9d5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -642,9 +642,12 @@ public abstract class AbstractFileSystem {
* {@link FileContext#truncate(Path, long)} except that Path f must be for
* this file system.
*/
-  public abstract boolean truncate(Path f, long newLength)
+  public boolean truncate(Path f, long newLength)
   throws AccessControlException, FileNotFoundException,
-  UnresolvedLinkException, IOException;
+  UnresolvedLinkException, IOException {
+throw new UnsupportedOperationException(getClass().getSimpleName()
++  doesn't support truncate);
+  }
 
   /**
* The specification of this method matches that of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/000b9e6c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
index 6b9378d..3bd14f1 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
@@ -141,12 +141,6 @@ public class TestAfsCheckPath {
 }
 
 @Override
-public boolean truncate(Path f, long newLength) throws IOException {
-  // deliberately empty
-  return false;
-}
-
-@Override
 public void renameInternal(Path src, Path dst) throws IOException {
   // deliberately empty
 }



hadoop git commit: HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator. (Chengbing Liu via yliu)

2015-02-17 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 441dfa486 - 54e33baaf


HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator. 
(Chengbing Liu via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54e33baa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54e33baa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54e33baa

Branch: refs/heads/branch-2
Commit: 54e33baaf68ed48865d1d082a79cbcfc52494e1f
Parents: 441dfa4
Author: yliu y...@apache.org
Authored: Tue Feb 17 07:50:14 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Feb 17 07:50:14 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java | 7 ++-
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54e33baa/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6391b34..6bfa34c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -660,6 +660,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-4625. BKJM doesn't take advantage of speculative reads. (Rakesh R
 via aajisaka)
 
+HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator.
+(Chengbing Liu via yliu)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54e33baa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index 00e9e98..8a743b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -49,9 +49,6 @@ import com.google.common.annotations.VisibleForTesting;
 public class URLConnectionFactory {
   private static final Log LOG = LogFactory.getLog(URLConnectionFactory.class);
 
-  /** SPNEGO authenticator */
-  private static final KerberosUgiAuthenticator AUTH = new 
KerberosUgiAuthenticator();
-
   /**
* Timeout for socket connects and reads
*/
@@ -161,8 +158,8 @@ public class URLConnectionFactory {
   }
   UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab();
   final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
-  return new AuthenticatedURL(AUTH, connConfigurator).openConnection(url,
-  authToken);
+  return new AuthenticatedURL(new KerberosUgiAuthenticator(),
+  connConfigurator).openConnection(url, authToken);
 } else {
   if (LOG.isDebugEnabled()) {
 LOG.debug(open URL connection);



hadoop git commit: HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator. (Chengbing Liu via yliu)

2015-02-17 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9729b244d - 500e6a0f4


HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator. 
(Chengbing Liu via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/500e6a0f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/500e6a0f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/500e6a0f

Branch: refs/heads/trunk
Commit: 500e6a0f46d14a591d0ec082b6d26ee59bdfdf76
Parents: 9729b24
Author: yliu y...@apache.org
Authored: Tue Feb 17 07:46:33 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Feb 17 07:46:33 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java | 7 ++-
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/500e6a0f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cc24dc4..8b234fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -957,6 +957,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-4625. BKJM doesn't take advantage of speculative reads. (Rakesh R
 via aajisaka)
 
+HDFS-7798. Checkpointing failure caused by shared KerberosAuthenticator.
+(Chengbing Liu via yliu)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/500e6a0f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index 00e9e98..8a743b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -49,9 +49,6 @@ import com.google.common.annotations.VisibleForTesting;
 public class URLConnectionFactory {
   private static final Log LOG = LogFactory.getLog(URLConnectionFactory.class);
 
-  /** SPNEGO authenticator */
-  private static final KerberosUgiAuthenticator AUTH = new 
KerberosUgiAuthenticator();
-
   /**
* Timeout for socket connects and reads
*/
@@ -161,8 +158,8 @@ public class URLConnectionFactory {
   }
   UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab();
   final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
-  return new AuthenticatedURL(AUTH, connConfigurator).openConnection(url,
-  authToken);
+  return new AuthenticatedURL(new KerberosUgiAuthenticator(),
+  connConfigurator).openConnection(url, authToken);
 } else {
   if (LOG.isDebugEnabled()) {
 LOG.debug(open URL connection);



hadoop git commit: HDFS-7761. cleanup unnecssary code logic in LocatedBlock. (yliu)

2015-02-11 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 67efab935 - 8a54384a0


HDFS-7761. cleanup unnecssary code logic in LocatedBlock. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a54384a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a54384a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a54384a

Branch: refs/heads/trunk
Commit: 8a54384a0a85b466284fe5717b1dea0a2f29ec8d
Parents: 67efab9
Author: yliu y...@apache.org
Authored: Thu Feb 12 03:08:16 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Feb 12 03:08:16 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../hdfs/protocol/DatanodeInfoWithStorage.java  |  4 ++
 .../hadoop/hdfs/protocol/LocatedBlock.java  | 47 ++--
 .../server/blockmanagement/DatanodeManager.java |  4 +-
 4 files changed, 22 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a54384a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5d0a6f3..53a70b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -619,6 +619,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7760. Document truncate for WebHDFS. (shv)
 
+HDFS-7761. cleanup unnecssary code logic in LocatedBlock. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a54384a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
index db2c2e7..54ffec65 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
@@ -33,6 +33,10 @@ public class DatanodeInfoWithStorage extends DatanodeInfo {
 super(from);
 this.storageID = storageID;
 this.storageType = storageType;
+setSoftwareVersion(from.getSoftwareVersion());
+setDependentHostNames(from.getDependentHostNames());
+setLevel(from.getLevel());
+setParent(from.getParent());
   }
 
   public String getStorageID() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a54384a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index 4ff24b9..f14c8da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -43,8 +43,6 @@ public class LocatedBlock {
   private final ExtendedBlock b;
   private long offset;  // offset of the first byte of the block in the file
   private final DatanodeInfoWithStorage[] locs;
-  private final boolean hasStorageIDs;
-  private final boolean hasStorageTypes;
   /** Cached storage ID for each replica */
   private String[] storageIDs;
   /** Cached storage type for each replica, if reported. */
@@ -104,16 +102,11 @@ public class LocatedBlock {
 DatanodeInfoWithStorage storage = new DatanodeInfoWithStorage(di,
 storageIDs != null ? storageIDs[i] : null,
 storageTypes != null ? storageTypes[i] : null);
-storage.setDependentHostNames(di.getDependentHostNames());
-storage.setLevel(di.getLevel());
-storage.setParent(di.getParent());
 this.locs[i] = storage;
   }
 }
 this.storageIDs = storageIDs;
 this.storageTypes = storageTypes;
-this.hasStorageIDs = storageIDs != null;
-this.hasStorageTypes = storageTypes != null;
 
 if (cachedLocs == null || cachedLocs.length == 0) {
   this.cachedLocs = EMPTY_LOCS;
@@ -137,48 +130,36 @@ public class LocatedBlock {
   /**
* Returns the locations associated with this block. The returned array is 
not
* expected to be modified. If it is, caller must immediately invoke
-   * {@link 
org.apache.hadoop.hdfs.protocol.LocatedBlock#invalidateCachedStorageInfo

hadoop git commit: HDFS-7761. cleanup unnecssary code logic in LocatedBlock. (yliu)

2015-02-11 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c9266132e - f8d0825f3


HDFS-7761. cleanup unnecssary code logic in LocatedBlock. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8d0825f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8d0825f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8d0825f

Branch: refs/heads/branch-2
Commit: f8d0825f39f2938703e179b9b572177a6322201f
Parents: c926613
Author: yliu y...@apache.org
Authored: Thu Feb 12 03:09:38 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Feb 12 03:09:38 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../hdfs/protocol/DatanodeInfoWithStorage.java  |  4 ++
 .../hadoop/hdfs/protocol/LocatedBlock.java  | 47 ++--
 .../server/blockmanagement/DatanodeManager.java |  4 +-
 4 files changed, 22 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8d0825f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7fe580e..f57f074 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -320,6 +320,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7760. Document truncate for WebHDFS. (shv)
 
+HDFS-7761. cleanup unnecssary code logic in LocatedBlock. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8d0825f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
index db2c2e7..54ffec65 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
@@ -33,6 +33,10 @@ public class DatanodeInfoWithStorage extends DatanodeInfo {
 super(from);
 this.storageID = storageID;
 this.storageType = storageType;
+setSoftwareVersion(from.getSoftwareVersion());
+setDependentHostNames(from.getDependentHostNames());
+setLevel(from.getLevel());
+setParent(from.getParent());
   }
 
   public String getStorageID() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8d0825f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index 4ff24b9..f14c8da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -43,8 +43,6 @@ public class LocatedBlock {
   private final ExtendedBlock b;
   private long offset;  // offset of the first byte of the block in the file
   private final DatanodeInfoWithStorage[] locs;
-  private final boolean hasStorageIDs;
-  private final boolean hasStorageTypes;
   /** Cached storage ID for each replica */
   private String[] storageIDs;
   /** Cached storage type for each replica, if reported. */
@@ -104,16 +102,11 @@ public class LocatedBlock {
 DatanodeInfoWithStorage storage = new DatanodeInfoWithStorage(di,
 storageIDs != null ? storageIDs[i] : null,
 storageTypes != null ? storageTypes[i] : null);
-storage.setDependentHostNames(di.getDependentHostNames());
-storage.setLevel(di.getLevel());
-storage.setParent(di.getParent());
 this.locs[i] = storage;
   }
 }
 this.storageIDs = storageIDs;
 this.storageTypes = storageTypes;
-this.hasStorageIDs = storageIDs != null;
-this.hasStorageTypes = storageTypes != null;
 
 if (cachedLocs == null || cachedLocs.length == 0) {
   this.cachedLocs = EMPTY_LOCS;
@@ -137,48 +130,36 @@ public class LocatedBlock {
   /**
* Returns the locations associated with this block. The returned array is 
not
* expected to be modified. If it is, caller must immediately invoke
-   * {@link 
org.apache.hadoop.hdfs.protocol.LocatedBlock#invalidateCachedStorageInfo

hadoop git commit: HADOOP-11510. Expose truncate API via FileContext. (yliu)

2015-02-09 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4d4442cb3 - ae316705b


HADOOP-11510. Expose truncate API via FileContext. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae316705
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae316705
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae316705

Branch: refs/heads/branch-2
Commit: ae316705bb79479038a13b80bab6febbe8f3c75f
Parents: 4d4442c
Author: yliu y...@apache.org
Authored: Tue Feb 10 01:43:08 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Feb 10 01:43:08 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../apache/hadoop/fs/AbstractFileSystem.java|  9 
 .../java/org/apache/hadoop/fs/ChecksumFs.java   |  5 +++
 .../apache/hadoop/fs/DelegateToFileSystem.java  |  6 +++
 .../java/org/apache/hadoop/fs/FileContext.java  | 43 
 .../java/org/apache/hadoop/fs/FilterFs.java |  8 
 .../org/apache/hadoop/fs/viewfs/ChRootedFs.java |  6 +++
 .../org/apache/hadoop/fs/viewfs/ViewFs.java | 17 +++-
 .../org/apache/hadoop/fs/TestAfsCheckPath.java  |  6 +++
 .../main/java/org/apache/hadoop/fs/Hdfs.java|  6 +++
 .../fs/TestHDFSFileContextMainOperations.java   | 32 ++-
 11 files changed, 138 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae316705/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8d1047f..a61c349 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -31,6 +31,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11045. Introducing a tool to detect flaky tests of hadoop jenkins 
testing
 job. (Yongjun Zhang and Todd Lipcon via ozawa)
 
+HADOOP-11510. Expose truncate API via FileContext. (yliu)
+
   IMPROVEMENTS
 
 HADOOP-11483. HardLink.java should use the jdk7 createLink method 
(aajisaka)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae316705/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 92d4eca..975cc3c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -639,6 +639,15 @@ public abstract class AbstractFileSystem {
 
   /**
* The specification of this method matches that of
+   * {@link FileContext#truncate(Path, long)} except that Path f must be for
+   * this file system.
+   */
+  public abstract boolean truncate(Path f, long newLength)
+  throws AccessControlException, FileNotFoundException,
+  UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
* {@link FileContext#setReplication(Path, short)} except that Path f must be
* for this file system.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae316705/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index ab5cd13..7dc4a80 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -297,6 +297,11 @@ public abstract class ChecksumFs extends FilterFs {
 
   }
 
+  @Override
+  public boolean truncate(Path f, long newLength) throws IOException {
+throw new IOException(Not supported);
+  }
+
   /**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae316705/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
index 1cdcb27..09707c6 100644
--- 
a/hadoop-common-project

hadoop git commit: HADOOP-11510. Expose truncate API via FileContext. (yliu)

2015-02-09 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 00a748d24 - 1b56d1ce3


HADOOP-11510. Expose truncate API via FileContext. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b56d1ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b56d1ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b56d1ce

Branch: refs/heads/trunk
Commit: 1b56d1ce324165688d40c238858e1e19a1e60f7e
Parents: 00a748d
Author: yliu y...@apache.org
Authored: Tue Feb 10 01:45:29 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Feb 10 01:45:29 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../apache/hadoop/fs/AbstractFileSystem.java|  9 
 .../java/org/apache/hadoop/fs/ChecksumFs.java   |  5 +++
 .../apache/hadoop/fs/DelegateToFileSystem.java  |  6 +++
 .../java/org/apache/hadoop/fs/FileContext.java  | 43 
 .../java/org/apache/hadoop/fs/FilterFs.java |  8 
 .../org/apache/hadoop/fs/viewfs/ChRootedFs.java |  6 +++
 .../org/apache/hadoop/fs/viewfs/ViewFs.java | 17 +++-
 .../org/apache/hadoop/fs/TestAfsCheckPath.java  |  6 +++
 .../main/java/org/apache/hadoop/fs/Hdfs.java|  6 +++
 .../fs/TestHDFSFileContextMainOperations.java   | 32 ++-
 11 files changed, 138 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b56d1ce/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 55baf8a..aa86360 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -411,6 +411,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11045. Introducing a tool to detect flaky tests of hadoop jenkins 
testing
 job. (Yongjun Zhang and Todd Lipcon via ozawa)
 
+HADOOP-11510. Expose truncate API via FileContext. (yliu)
+
   IMPROVEMENTS
 
 HADOOP-11483. HardLink.java should use the jdk7 createLink method 
(aajisaka)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b56d1ce/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 92d4eca..975cc3c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -639,6 +639,15 @@ public abstract class AbstractFileSystem {
 
   /**
* The specification of this method matches that of
+   * {@link FileContext#truncate(Path, long)} except that Path f must be for
+   * this file system.
+   */
+  public abstract boolean truncate(Path f, long newLength)
+  throws AccessControlException, FileNotFoundException,
+  UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
* {@link FileContext#setReplication(Path, short)} except that Path f must be
* for this file system.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b56d1ce/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index ab5cd13..7dc4a80 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -297,6 +297,11 @@ public abstract class ChecksumFs extends FilterFs {
 
   }
 
+  @Override
+  public boolean truncate(Path f, long newLength) throws IOException {
+throw new IOException(Not supported);
+  }
+
   /**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b56d1ce/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
index 1cdcb27..09707c6 100644
--- 
a/hadoop-common-project/hadoop

hadoop git commit: HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted Yu via yliu)

2015-02-09 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1b56d1ce3 - 260b5e32c


HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted Yu 
via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/260b5e32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/260b5e32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/260b5e32

Branch: refs/heads/trunk
Commit: 260b5e32c427d54c8c74b9f84432700317d1f282
Parents: 1b56d1c
Author: yliu y...@apache.org
Authored: Tue Feb 10 01:57:51 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Feb 10 01:57:51 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/protocol/DatanodeInfoWithStorage.java  | 63 
 .../hadoop/hdfs/protocol/LocatedBlock.java  |  4 +-
 .../protocol/DatanodeInfoWithStorage.java   | 59 --
 .../blockmanagement/TestDatanodeManager.java|  8 +--
 5 files changed, 72 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/260b5e32/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5a77829..a841c7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -887,6 +887,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7647. DatanodeManager.sortLocatedBlocks sorts DatanodeInfos
 but not StorageIDs. (Milan Desai via Arpit Agarwal)
 
+HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted
+Yu via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/260b5e32/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
new file mode 100644
index 000..db2c2e7
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DatanodeInfoWithStorage extends DatanodeInfo {
+  private final String storageID;
+  private final StorageType storageType;
+
+  public DatanodeInfoWithStorage(DatanodeInfo from, String storageID,
+ StorageType storageType) {
+super(from);
+this.storageID = storageID;
+this.storageType = storageType;
+  }
+
+  public String getStorageID() {
+return storageID;
+  }
+
+  public StorageType getStorageType() {
+return storageType;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+// allows this class to be used interchangeably with DatanodeInfo
+return super.equals(o);
+  }
+
+  @Override
+  public int hashCode() {
+// allows this class to be used interchangeably with DatanodeInfo
+return super.hashCode();
+  }
+
+  @Override
+  public String toString() {
+return DatanodeInfoWithStorage[ + super.toString() + , + storageID +
+, + storageType + ];
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/260b5e32/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop

hadoop git commit: HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted Yu via yliu)

2015-02-09 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ae316705b - cc0668ebe


HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted Yu 
via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc0668eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc0668eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc0668eb

Branch: refs/heads/branch-2
Commit: cc0668ebe67078878d51d8158ac948383a3e351e
Parents: ae31670
Author: yliu y...@apache.org
Authored: Tue Feb 10 02:00:32 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Feb 10 02:00:32 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/protocol/DatanodeInfoWithStorage.java  | 63 
 .../hadoop/hdfs/protocol/LocatedBlock.java  |  4 +-
 .../protocol/DatanodeInfoWithStorage.java   | 59 --
 .../blockmanagement/TestDatanodeManager.java|  8 +--
 5 files changed, 72 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc0668eb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3278dd8..8d49f3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -587,6 +587,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7647. DatanodeManager.sortLocatedBlocks sorts DatanodeInfos
 but not StorageIDs. (Milan Desai via Arpit Agarwal)
 
+HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted
+Yu via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc0668eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
new file mode 100644
index 000..db2c2e7
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DatanodeInfoWithStorage extends DatanodeInfo {
+  private final String storageID;
+  private final StorageType storageType;
+
+  public DatanodeInfoWithStorage(DatanodeInfo from, String storageID,
+ StorageType storageType) {
+super(from);
+this.storageID = storageID;
+this.storageType = storageType;
+  }
+
+  public String getStorageID() {
+return storageID;
+  }
+
+  public StorageType getStorageType() {
+return storageType;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+// allows this class to be used interchangeably with DatanodeInfo
+return super.equals(o);
+  }
+
+  @Override
+  public int hashCode() {
+// allows this class to be used interchangeably with DatanodeInfo
+return super.hashCode();
+  }
+
+  @Override
+  public String toString() {
+return DatanodeInfoWithStorage[ + super.toString() + , + storageID +
+, + storageType + ];
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc0668eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache

hadoop git commit: MAPREDUCE-6227. DFSIO for truncate. (shv via yliu)

2015-02-07 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c5f18ba65 - 693397514


MAPREDUCE-6227. DFSIO for truncate. (shv via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69339751
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69339751
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69339751

Branch: refs/heads/branch-2
Commit: 69339751438ed6ab46618965b6b2a95c97ad5551
Parents: c5f18ba
Author: yliu y...@apache.org
Authored: Sun Feb 8 02:42:10 2015 +0800
Committer: yliu y...@apache.org
Committed: Sun Feb 8 02:42:10 2015 +0800

--
 hadoop-mapreduce-project/CHANGES.txt|  2 +
 .../java/org/apache/hadoop/fs/TestDFSIO.java| 91 ++--
 2 files changed, 84 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69339751/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b8e46f4..91f9be7 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -42,6 +42,8 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-5800. Use Job#getInstance instead of deprecated constructors
 (aajisaka)
 
+MAPREDUCE-6227. DFSIO for truncate. (shv via yliu)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69339751/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
index 2384ff1..f85a2ee 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
@@ -31,7 +31,6 @@ import java.io.PrintStream;
 import java.util.Date;
 import java.util.Random;
 import java.util.StringTokenizer;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -92,13 +91,13 @@ public class TestDFSIO implements Tool {
   private static final String BASE_FILE_NAME = test_io_;
   private static final String DEFAULT_RES_FILE_NAME = TestDFSIO_results.log;
   private static final long MEGA = ByteMultiple.MB.value();
-  private static final int DEFAULT_NR_BYTES = 1;
+  private static final int DEFAULT_NR_BYTES = 128;
   private static final int DEFAULT_NR_FILES = 4;
   private static final String USAGE =
 Usage:  + TestDFSIO.class.getSimpleName() +
  [genericOptions] +
  -read [-random | -backward | -skip [-skipSize Size]] | +
- -write | -append | -clean +
+ -write | -append | -truncate | -clean +
  [-compression codecClassName] +
  [-nrFiles N] +
  [-size Size[B|KB|MB|GB|TB]] +
@@ -121,7 +120,8 @@ public class TestDFSIO implements Tool {
 TEST_TYPE_APPEND(append),
 TEST_TYPE_READ_RANDOM(random read),
 TEST_TYPE_READ_BACKWARD(backward read),
-TEST_TYPE_READ_SKIP(skip read);
+TEST_TYPE_READ_SKIP(skip read),
+TEST_TYPE_TRUNCATE(truncate);
 
 private String type;
 
@@ -192,6 +192,9 @@ public class TestDFSIO implements Tool {
   private static Path getRandomReadDir(Configuration conf) {
 return new Path(getBaseDir(conf), io_random_read);
   }
+  private static Path getTruncateDir(Configuration conf) {
+return new Path(getBaseDir(conf), io_truncate);
+  }
   private static Path getDataDir(Configuration conf) {
 return new Path(getBaseDir(conf), io_data);
   }
@@ -203,6 +206,7 @@ public class TestDFSIO implements Tool {
   public static void beforeClass() throws Exception {
 bench = new TestDFSIO();
 bench.getConf().setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
+bench.getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
 cluster = new MiniDFSCluster.Builder(bench.getConf())
 .numDataNodes(2)
 .format(true)
@@ -279,6 +283,16 @@ public class TestDFSIO implements Tool {
 bench.analyzeResult(fs, TestType.TEST_TYPE_APPEND, execTime);
   }
 
+  @Test (timeout = 6)
+  public

hadoop git commit: HDFS-7741. Remove unnecessary synchronized in FSDataInputStream and HdfsDataInputStream. (yliu)

2015-02-05 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 995f9809c - e2803f5a5


HDFS-7741. Remove unnecessary synchronized in FSDataInputStream and 
HdfsDataInputStream. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2803f5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2803f5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2803f5a

Branch: refs/heads/branch-2
Commit: e2803f5a5103119388bc8cfddeb7616e6b33f1ca
Parents: 995f980
Author: yliu y...@apache.org
Authored: Fri Feb 6 06:29:52 2015 +0800
Committer: yliu y...@apache.org
Committed: Fri Feb 6 06:29:52 2015 +0800

--
 .../src/main/java/org/apache/hadoop/fs/FSDataInputStream.java | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java   | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2803f5a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
index c8609d4..6d39d1e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
@@ -58,7 +58,7 @@ public class FSDataInputStream extends DataInputStream
* @param desired offset to seek to
*/
   @Override
-  public synchronized void seek(long desired) throws IOException {
+  public void seek(long desired) throws IOException {
 ((Seekable)in).seek(desired);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2803f5a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fe9ddb6..011087e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -569,6 +569,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7698. Fix locking on HDFS read statistics and add a method for
 clearing them. (Colin P. McCabe via yliu)
 
+HDFS-7741. Remove unnecessary synchronized in FSDataInputStream and
+HdfsDataInputStream. (yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2803f5a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
index 72c57a8..e8ac686 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
@@ -83,7 +83,7 @@ public class HdfsDataInputStream extends FSDataInputStream {
   /**
* Get the collection of blocks that has already been located.
*/
-  public synchronized ListLocatedBlock getAllBlocks() throws IOException {
+  public ListLocatedBlock getAllBlocks() throws IOException {
 return getDFSInputStream().getAllBlocks();
   }
 



hadoop git commit: HDFS-7741. Remove unnecessary synchronized in FSDataInputStream and HdfsDataInputStream. (yliu)

2015-02-05 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9d9106911 - 7b10ef0c3


HDFS-7741. Remove unnecessary synchronized in FSDataInputStream and 
HdfsDataInputStream. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b10ef0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b10ef0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b10ef0c

Branch: refs/heads/trunk
Commit: 7b10ef0c3bfec9cdf20d6e2385b6d218809a37b9
Parents: 9d91069
Author: yliu y...@apache.org
Authored: Fri Feb 6 06:31:03 2015 +0800
Committer: yliu y...@apache.org
Committed: Fri Feb 6 06:31:03 2015 +0800

--
 .../src/main/java/org/apache/hadoop/fs/FSDataInputStream.java | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java   | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b10ef0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
index c8609d4..6d39d1e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
@@ -58,7 +58,7 @@ public class FSDataInputStream extends DataInputStream
* @param desired offset to seek to
*/
   @Override
-  public synchronized void seek(long desired) throws IOException {
+  public void seek(long desired) throws IOException {
 ((Seekable)in).seek(desired);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b10ef0c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 756b70f..78aa992 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -851,6 +851,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7698. Fix locking on HDFS read statistics and add a method for
 clearing them. (Colin P. McCabe via yliu)
 
+HDFS-7741. Remove unnecessary synchronized in FSDataInputStream and
+HdfsDataInputStream. (yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b10ef0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
index 72c57a8..e8ac686 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
@@ -83,7 +83,7 @@ public class HdfsDataInputStream extends FSDataInputStream {
   /**
* Get the collection of blocks that has already been located.
*/
-  public synchronized ListLocatedBlock getAllBlocks() throws IOException {
+  public ListLocatedBlock getAllBlocks() throws IOException {
 return getDFSInputStream().getAllBlocks();
   }
 



hadoop git commit: HDFS-7655. Expose truncate API for Web HDFS. (yliu)

2015-02-05 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4641196fe - 03f7ed382


HDFS-7655. Expose truncate API for Web HDFS. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03f7ed38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03f7ed38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03f7ed38

Branch: refs/heads/trunk
Commit: 03f7ed382b2c06f075811b29096d5bf79f26a5e5
Parents: 4641196
Author: yliu y...@apache.org
Authored: Thu Feb 5 23:45:06 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Feb 5 23:45:06 2015 +0800

--
 .../apache/hadoop/fs/FileSystemTestHelper.java  | 27 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../web/resources/NamenodeWebHdfsMethods.java   | 30 
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  8 
 .../hdfs/web/resources/NewLengthParam.java  | 49 
 .../hadoop/hdfs/web/resources/PostOpParam.java  |  2 +
 .../hdfs/web/TestFSMainOperationsWebHdfs.java   | 29 
 7 files changed, 129 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03f7ed38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
index a5d8403..4a88c51 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
@@ -22,7 +22,6 @@ import java.io.FileNotFoundException;
 import java.net.URI;
 import java.util.Random;
 
-
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.token.Token;
@@ -127,28 +126,36 @@ public class FileSystemTestHelper {
*/
   public static long createFile(FileSystem fSys, Path path, int numBlocks,
   int blockSize, short numRepl, boolean createParent) throws IOException {
-FSDataOutputStream out = 
-  fSys.create(path, false, 4096, numRepl, blockSize );
+return createFile(fSys, path, getFileData(numBlocks, blockSize),
+blockSize, numRepl);
+  }
 
-byte[] data = getFileData(numBlocks, blockSize);
-out.write(data, 0, data.length);
-out.close();
+  public static long createFile(FileSystem fSys, Path path, byte[] data,
+  int blockSize, short numRepl) throws IOException {
+FSDataOutputStream out = 
+fSys.create(path, false, 4096, numRepl, blockSize);
+try {
+  out.write(data, 0, data.length);
+} finally {
+  out.close();
+}
 return data.length;
   }
 
-
   public static long createFile(FileSystem fSys, Path path, int numBlocks,
   int blockSize, boolean createParent) throws IOException {
-  return createFile(fSys, path, numBlocks, blockSize, 
fSys.getDefaultReplication(path), true);
+return createFile(fSys, path, numBlocks, blockSize,
+fSys.getDefaultReplication(path), true);
   }
 
   public static long createFile(FileSystem fSys, Path path, int numBlocks,
   int blockSize) throws IOException {
-  return createFile(fSys, path, numBlocks, blockSize, true);
+return createFile(fSys, path, numBlocks, blockSize, true);
   }
 
   public static long createFile(FileSystem fSys, Path path) throws IOException 
{
-return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, 
DEFAULT_NUM_REPL, true);
+return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE,
+DEFAULT_NUM_REPL, true);
   }
 
   public long createFile(FileSystem fSys, String name) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03f7ed38/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 62ab1f9..ac73ab9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -306,6 +306,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-6673. Add delimited format support to PB OIV tool. (Eddy Xu via wang)
 
+HDFS-7655. Expose truncate API for Web HDFS. (yliu)
+
   IMPROVEMENTS
 
 HDFS-7055. Add tracing to DFSInputStream (cmccabe)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03f7ed38/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

hadoop git commit: HDFS-7655. Expose truncate API for Web HDFS. (yliu)

2015-02-05 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b10905114 - 3ffe5a1ed


HDFS-7655. Expose truncate API for Web HDFS. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ffe5a1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ffe5a1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ffe5a1e

Branch: refs/heads/branch-2
Commit: 3ffe5a1ed26a746cbeb56a6827e3c299bb40181b
Parents: b109051
Author: yliu y...@apache.org
Authored: Thu Feb 5 23:48:55 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Feb 5 23:48:55 2015 +0800

--
 .../apache/hadoop/fs/FileSystemTestHelper.java  | 27 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../web/resources/NamenodeWebHdfsMethods.java   | 30 
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  8 
 .../hdfs/web/resources/NewLengthParam.java  | 49 
 .../hadoop/hdfs/web/resources/PostOpParam.java  |  2 +
 .../hdfs/web/TestFSMainOperationsWebHdfs.java   | 29 
 7 files changed, 129 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ffe5a1e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
index a5d8403..4a88c51 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
@@ -22,7 +22,6 @@ import java.io.FileNotFoundException;
 import java.net.URI;
 import java.util.Random;
 
-
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.token.Token;
@@ -127,28 +126,36 @@ public class FileSystemTestHelper {
*/
   public static long createFile(FileSystem fSys, Path path, int numBlocks,
   int blockSize, short numRepl, boolean createParent) throws IOException {
-FSDataOutputStream out = 
-  fSys.create(path, false, 4096, numRepl, blockSize );
+return createFile(fSys, path, getFileData(numBlocks, blockSize),
+blockSize, numRepl);
+  }
 
-byte[] data = getFileData(numBlocks, blockSize);
-out.write(data, 0, data.length);
-out.close();
+  public static long createFile(FileSystem fSys, Path path, byte[] data,
+  int blockSize, short numRepl) throws IOException {
+FSDataOutputStream out = 
+fSys.create(path, false, 4096, numRepl, blockSize);
+try {
+  out.write(data, 0, data.length);
+} finally {
+  out.close();
+}
 return data.length;
   }
 
-
   public static long createFile(FileSystem fSys, Path path, int numBlocks,
   int blockSize, boolean createParent) throws IOException {
-  return createFile(fSys, path, numBlocks, blockSize, 
fSys.getDefaultReplication(path), true);
+return createFile(fSys, path, numBlocks, blockSize,
+fSys.getDefaultReplication(path), true);
   }
 
   public static long createFile(FileSystem fSys, Path path, int numBlocks,
   int blockSize) throws IOException {
-  return createFile(fSys, path, numBlocks, blockSize, true);
+return createFile(fSys, path, numBlocks, blockSize, true);
   }
 
   public static long createFile(FileSystem fSys, Path path) throws IOException 
{
-return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, 
DEFAULT_NUM_REPL, true);
+return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE,
+DEFAULT_NUM_REPL, true);
   }
 
   public long createFile(FileSystem fSys, String name) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ffe5a1e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 038fbdd..767bb51 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -25,6 +25,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-6673. Add delimited format support to PB OIV tool. (Eddy Xu via wang)
 
+HDFS-7655. Expose truncate API for Web HDFS. (yliu)
+
   IMPROVEMENTS
 
 HDFS-7055. Add tracing to DFSInputStream (cmccabe)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ffe5a1e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

hadoop git commit: HDFS-7698. Fix locking on HDFS read statistics and add a method for clearing them. (Colin P. McCabe via yliu)

2015-02-05 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3ffe5a1ed - 44d800b35


HDFS-7698. Fix locking on HDFS read statistics and add a method for clearing 
them. (Colin P. McCabe via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44d800b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44d800b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44d800b3

Branch: refs/heads/branch-2
Commit: 44d800b353a56dd43c793b70f6a08146d38d4dd5
Parents: 3ffe5a1
Author: yliu y...@apache.org
Authored: Thu Feb 5 23:57:36 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Feb 5 23:57:36 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 73 
 .../hadoop/hdfs/client/HdfsDataInputStream.java |  6 +-
 .../src/main/native/libhdfs/exception.c |  5 ++
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.c  | 33 -
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.h  | 13 
 .../main/native/libhdfs/test_libhdfs_threaded.c |  4 ++
 7 files changed, 108 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44d800b3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 767bb51..fe9ddb6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -566,6 +566,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7709. Fix findbug warnings in httpfs. (Rakesh R via ozawa)
 
+HDFS-7698. Fix locking on HDFS read statistics and add a method for
+clearing them. (Colin P. McCabe via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44d800b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index c9b86d0..9e75333 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -131,10 +131,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 
   public static class ReadStatistics {
 public ReadStatistics() {
-  this.totalBytesRead = 0;
-  this.totalLocalBytesRead = 0;
-  this.totalShortCircuitBytesRead = 0;
-  this.totalZeroCopyBytesRead = 0;
+  clear();
 }
 
 public ReadStatistics(ReadStatistics rhs) {
@@ -203,6 +200,13 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   this.totalShortCircuitBytesRead += amt;
   this.totalZeroCopyBytesRead += amt;
 }
+
+void clear() {
+  this.totalBytesRead = 0;
+  this.totalLocalBytesRead = 0;
+  this.totalShortCircuitBytesRead = 0;
+  this.totalZeroCopyBytesRead = 0;
+}
 
 private long totalBytesRead;
 
@@ -412,7 +416,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   /**
* Return collection of blocks that has already been located.
*/
-  public synchronized ListLocatedBlock getAllBlocks() throws IOException {
+  public ListLocatedBlock getAllBlocks() throws IOException {
 return getBlockRange(0, getFileLength());
   }
 
@@ -700,26 +704,28 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
* strategy-agnostic.
*/
   private interface ReaderStrategy {
-public int doRead(BlockReader blockReader, int off, int len,
-ReadStatistics readStatistics) throws ChecksumException, IOException;
+public int doRead(BlockReader blockReader, int off, int len)
+throws ChecksumException, IOException;
   }
 
-  private static void updateReadStatistics(ReadStatistics readStatistics, 
+  private void updateReadStatistics(ReadStatistics readStatistics, 
 int nRead, BlockReader blockReader) {
 if (nRead = 0) return;
-if (blockReader.isShortCircuit()) {
-  readStatistics.addShortCircuitBytes(nRead);
-} else if (blockReader.isLocal()) {
-  readStatistics.addLocalBytes(nRead);
-} else {
-  readStatistics.addRemoteBytes(nRead);
+synchronized(infoLock) {
+  if (blockReader.isShortCircuit()) {
+readStatistics.addShortCircuitBytes(nRead);
+  } else if (blockReader.isLocal()) {
+readStatistics.addLocalBytes(nRead);
+  } else {
+readStatistics.addRemoteBytes(nRead

hadoop git commit: HDFS-7698. Fix locking on HDFS read statistics and add a method for clearing them. (Colin P. McCabe via yliu)

2015-02-05 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 03f7ed382 - 45ea53f93


HDFS-7698. Fix locking on HDFS read statistics and add a method for clearing 
them. (Colin P. McCabe via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45ea53f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45ea53f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45ea53f9

Branch: refs/heads/trunk
Commit: 45ea53f9388e6bff1ac0aa3989a1dad56a611fd3
Parents: 03f7ed3
Author: yliu y...@apache.org
Authored: Thu Feb 5 23:56:29 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Feb 5 23:56:29 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 73 
 .../hadoop/hdfs/client/HdfsDataInputStream.java |  6 +-
 .../src/main/native/libhdfs/exception.c |  5 ++
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.c  | 33 -
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.h  | 13 
 .../main/native/libhdfs/test_libhdfs_threaded.c |  4 ++
 7 files changed, 108 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ea53f9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ac73ab9..756b70f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -848,6 +848,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7709. Fix findbug warnings in httpfs. (Rakesh R via ozawa)
 
+HDFS-7698. Fix locking on HDFS read statistics and add a method for
+clearing them. (Colin P. McCabe via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ea53f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index c9b86d0..9e75333 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -131,10 +131,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 
   public static class ReadStatistics {
 public ReadStatistics() {
-  this.totalBytesRead = 0;
-  this.totalLocalBytesRead = 0;
-  this.totalShortCircuitBytesRead = 0;
-  this.totalZeroCopyBytesRead = 0;
+  clear();
 }
 
 public ReadStatistics(ReadStatistics rhs) {
@@ -203,6 +200,13 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   this.totalShortCircuitBytesRead += amt;
   this.totalZeroCopyBytesRead += amt;
 }
+
+void clear() {
+  this.totalBytesRead = 0;
+  this.totalLocalBytesRead = 0;
+  this.totalShortCircuitBytesRead = 0;
+  this.totalZeroCopyBytesRead = 0;
+}
 
 private long totalBytesRead;
 
@@ -412,7 +416,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   /**
* Return collection of blocks that has already been located.
*/
-  public synchronized ListLocatedBlock getAllBlocks() throws IOException {
+  public ListLocatedBlock getAllBlocks() throws IOException {
 return getBlockRange(0, getFileLength());
   }
 
@@ -700,26 +704,28 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
* strategy-agnostic.
*/
   private interface ReaderStrategy {
-public int doRead(BlockReader blockReader, int off, int len,
-ReadStatistics readStatistics) throws ChecksumException, IOException;
+public int doRead(BlockReader blockReader, int off, int len)
+throws ChecksumException, IOException;
   }
 
-  private static void updateReadStatistics(ReadStatistics readStatistics, 
+  private void updateReadStatistics(ReadStatistics readStatistics, 
 int nRead, BlockReader blockReader) {
 if (nRead = 0) return;
-if (blockReader.isShortCircuit()) {
-  readStatistics.addShortCircuitBytes(nRead);
-} else if (blockReader.isLocal()) {
-  readStatistics.addLocalBytes(nRead);
-} else {
-  readStatistics.addRemoteBytes(nRead);
+synchronized(infoLock) {
+  if (blockReader.isShortCircuit()) {
+readStatistics.addShortCircuitBytes(nRead);
+  } else if (blockReader.isLocal()) {
+readStatistics.addLocalBytes(nRead);
+  } else {
+readStatistics.addRemoteBytes(nRead

hadoop git commit: HDFS-7423. various typos and message formatting fixes in nfs daemon and doc. (Charles Lamb via yliu)

2015-01-28 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 43b3b43ce - dc7950ef1


HDFS-7423. various typos and message formatting fixes in nfs daemon and doc. 
(Charles Lamb via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc7950ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc7950ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc7950ef

Branch: refs/heads/branch-2
Commit: dc7950ef186cf928674fc3067ea7871ea443c322
Parents: 43b3b43
Author: yliu y...@apache.org
Authored: Thu Jan 29 04:20:14 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Jan 29 04:20:14 2015 +0800

--
 .../hadoop/hdfs/nfs/nfs3/AsyncDataService.java  |  16 +--
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   |  78 +--
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java| 138 +--
 .../hadoop/hdfs/nfs/nfs3/WriteManager.java  |  26 ++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  10 +-
 6 files changed, 136 insertions(+), 135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc7950ef/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
index 429a457..ee3f90a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
@@ -22,12 +22,11 @@ import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
 /**
- * This class is a thread pool to easily schedule async data operations.Current
+ * This class is a thread pool to easily schedule async data operations. 
Current
  * async data operation is write back operation. In the future, we could use it
  * for readahead operations too.
  */
@@ -69,8 +68,8 @@ public class AsyncDataService {
 }
 if (LOG.isDebugEnabled()) {
   LOG.debug(Current active thread number:  + executor.getActiveCount()
-  +  queue size: + executor.getQueue().size()
-  +  scheduled task number: + executor.getTaskCount());
+  +  queue size:  + executor.getQueue().size()
+  +  scheduled task number:  + executor.getTaskCount());
 }
 executor.execute(task);
   }
@@ -105,10 +104,9 @@ public class AsyncDataService {
   }
 
   /**
-   * A task for write data back to HDFS for a file. Since only one thread can
-   * write for a file, any time there should be only one task(in queue or
-   * executing) for one file existing, and this should be guaranteed by the
-   * caller.
+   * A task to write data back to HDFS for a file. Since only one thread can
+   * write to a file, there should only be one task at any time for a file
+   * (in queue or executing), and this should be guaranteed by the caller.
*/
   static class WriteBackTask implements Runnable {
 
@@ -135,7 +133,7 @@ public class AsyncDataService {
   try {
 openFileCtx.executeWriteBack();
   } catch (Throwable t) {
-LOG.error(Asyn data service got error:, t);
+LOG.error(Async data service got error: , t);
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc7950ef/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
index ad09f51..eb14820 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
@@ -211,7 +211,7 @@ class OpenFileCtx {
   private long updateNonSequentialWriteInMemory(long count) {
 long newValue = nonSequentialWriteInMemory.addAndGet(count);
 if (LOG.isDebugEnabled()) {
-  LOG.debug(Update nonSequentialWriteInMemory by  + count +  new value:
+  LOG.debug(Update nonSequentialWriteInMemory by  + count +  new value: 

   + newValue);
 }
 
@@ -314,7 +314,7 @@ class

hadoop git commit: HDFS-7423. various typos and message formatting fixes in nfs daemon and doc. (Charles Lamb via yliu)

2015-01-28 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5a0051f4d - f37849188


HDFS-7423. various typos and message formatting fixes in nfs daemon and doc. 
(Charles Lamb via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3784918
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3784918
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3784918

Branch: refs/heads/trunk
Commit: f37849188b05a6251584de1aed5e66d5dfa7da4f
Parents: 5a0051f
Author: yliu y...@apache.org
Authored: Thu Jan 29 04:56:04 2015 +0800
Committer: yliu y...@apache.org
Committed: Thu Jan 29 04:56:04 2015 +0800

--
 .../hadoop/hdfs/nfs/nfs3/AsyncDataService.java  |  16 +--
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   |  78 +--
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java| 138 +--
 .../hadoop/hdfs/nfs/nfs3/WriteManager.java  |  26 ++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  10 +-
 6 files changed, 136 insertions(+), 135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3784918/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
index 429a457..ee3f90a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
@@ -22,12 +22,11 @@ import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
 /**
- * This class is a thread pool to easily schedule async data operations.Current
+ * This class is a thread pool to easily schedule async data operations. 
Current
  * async data operation is write back operation. In the future, we could use it
  * for readahead operations too.
  */
@@ -69,8 +68,8 @@ public class AsyncDataService {
 }
 if (LOG.isDebugEnabled()) {
   LOG.debug(Current active thread number:  + executor.getActiveCount()
-  +  queue size: + executor.getQueue().size()
-  +  scheduled task number: + executor.getTaskCount());
+  +  queue size:  + executor.getQueue().size()
+  +  scheduled task number:  + executor.getTaskCount());
 }
 executor.execute(task);
   }
@@ -105,10 +104,9 @@ public class AsyncDataService {
   }
 
   /**
-   * A task for write data back to HDFS for a file. Since only one thread can
-   * write for a file, any time there should be only one task(in queue or
-   * executing) for one file existing, and this should be guaranteed by the
-   * caller.
+   * A task to write data back to HDFS for a file. Since only one thread can
+   * write to a file, there should only be one task at any time for a file
+   * (in queue or executing), and this should be guaranteed by the caller.
*/
   static class WriteBackTask implements Runnable {
 
@@ -135,7 +133,7 @@ public class AsyncDataService {
   try {
 openFileCtx.executeWriteBack();
   } catch (Throwable t) {
-LOG.error(Asyn data service got error:, t);
+LOG.error(Async data service got error: , t);
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3784918/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
index a06d1c5..9610f48 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
@@ -211,7 +211,7 @@ class OpenFileCtx {
   private long updateNonSequentialWriteInMemory(long count) {
 long newValue = nonSequentialWriteInMemory.addAndGet(count);
 if (LOG.isDebugEnabled()) {
-  LOG.debug(Update nonSequentialWriteInMemory by  + count +  new value:
+  LOG.debug(Update nonSequentialWriteInMemory by  + count +  new value: 

   + newValue);
 }
 
@@ -312,7 +312,7 @@ class OpenFileCtx

hadoop git commit: HDFS-7677. DistributedFileSystem#truncate should resolve symlinks. (yliu)

2015-01-27 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4718af435 - d483ba25d


HDFS-7677. DistributedFileSystem#truncate should resolve symlinks. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d483ba25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d483ba25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d483ba25

Branch: refs/heads/branch-2
Commit: d483ba25d7e90ec140a86c526c7e60cc6015f210
Parents: 4718af4
Author: yliu y...@apache.org
Authored: Tue Jan 27 23:47:52 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Jan 27 23:47:52 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../hadoop/hdfs/DistributedFileSystem.java  | 14 -
 .../hdfs/server/namenode/TestFileTruncate.java  | 30 
 3 files changed, 45 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d483ba25/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7ebc31e..2c73143 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -514,6 +514,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7566. Remove obsolete entries from hdfs-default.xml (Ray Chiang
 via aw)
 
+HDFS-7677. DistributedFileSystem#truncate should resolve symlinks. (yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d483ba25/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index c7f8b7f..97ef2f5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -630,7 +630,19 @@ public class DistributedFileSystem extends FileSystem {
   @Override
   public boolean truncate(Path f, final long newLength) throws IOException {
 statistics.incrementWriteOps(1);
-return dfs.truncate(getPathName(f), newLength);
+Path absF = fixRelativePart(f);
+return new FileSystemLinkResolverBoolean() {
+  @Override
+  public Boolean doCall(final Path p)
+  throws IOException, UnresolvedLinkException {
+return dfs.truncate(getPathName(p), newLength);
+  }
+  @Override
+  public Boolean next(final FileSystem fs, final Path p)
+  throws IOException {
+return fs.truncate(p, newLength);
+  }
+}.resolve(this, absF);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d483ba25/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index e8250a2..579e718 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -759,6 +759,36 @@ public class TestFileTruncate {
 }
   }
 
+  @Test
+  public void testTruncate4Symlink() throws IOException {
+final int fileLength = 3 * BLOCK_SIZE;
+
+final Path parent = new Path(/test);
+fs.mkdirs(parent);
+final byte[] contents = AppendTestUtil.initBuffer(fileLength);
+final Path file = new Path(parent, testTruncate4Symlink);
+writeContents(contents, fileLength, file);
+
+final Path link = new Path(parent, link);
+fs.createSymlink(file, link, false);
+
+final int newLength = fileLength/3;
+boolean isReady = fs.truncate(link, newLength);
+
+assertTrue(Recovery is not expected., isReady);
+
+FileStatus fileStatus = fs.getFileStatus(file);
+assertThat(fileStatus.getLen(), is((long) newLength));
+
+ContentSummary cs = fs.getContentSummary(parent);
+assertEquals(Bad disk space usage,
+cs.getSpaceConsumed(), newLength * REPLICATION);
+// validate the file content
+checkFullFile(file, newLength, contents);
+
+fs.delete(parent, true);
+  }
+
   static void writeContents(byte[] contents, int fileLength, Path

hadoop git commit: HADOOP-11469. KMS should skip default.key.acl and whitelist.key.acl when loading key acl. (Dian Fu via yliu)

2015-01-27 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9ca565e97 - ee1e06a3a


HADOOP-11469. KMS should skip default.key.acl and whitelist.key.acl when 
loading key acl. (Dian Fu via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee1e06a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee1e06a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee1e06a3

Branch: refs/heads/trunk
Commit: ee1e06a3ab9136a3cd32b44c5535dfd2443bfad6
Parents: 9ca565e
Author: yliu y...@apache.org
Authored: Wed Jan 28 00:07:21 2015 +0800
Committer: yliu y...@apache.org
Committed: Wed Jan 28 00:07:21 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../hadoop/crypto/key/kms/server/KMSACLs.java |  7 +--
 .../crypto/key/kms/server/KMSConfiguration.java   |  1 +
 .../hadoop/crypto/key/kms/server/TestKMSACLs.java | 18 +++---
 4 files changed, 24 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee1e06a3/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0396e7d..b87c9ae 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -774,6 +774,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11509. Change parsing sequence in GenericOptionsParser to parse -D
 parameters before -files. (xgong)
 
+HADOOP-11469. KMS should skip default.key.acl and whitelist.key.acl when
+loading key acl. (Dian Fu via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee1e06a3/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
index c33dd4b..5b67950 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
@@ -36,6 +36,8 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Pattern;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Provides access to the codeAccessControlList/codes used by KMS,
  * hot-reloading them if the codekms-acls.xml/code file where the ACLs
@@ -70,7 +72,8 @@ public class KMSACLs implements Runnable, KeyACLs {
 
   private volatile MapType, AccessControlList acls;
   private volatile MapType, AccessControlList blacklistedAcls;
-  private volatile MapString, HashMapKeyOpType, AccessControlList keyAcls;
+  @VisibleForTesting
+  volatile MapString, HashMapKeyOpType, AccessControlList keyAcls;
   private final MapKeyOpType, AccessControlList defaultKeyAcls =
   new HashMapKeyOpType, AccessControlList();
   private final MapKeyOpType, AccessControlList whitelistKeyAcls =
@@ -112,7 +115,7 @@ public class KMSACLs implements Runnable, KeyACLs {
 MapString, HashMapKeyOpType, AccessControlList tempKeyAcls =
 new HashMapString, HashMapKeyOpType,AccessControlList();
 MapString, String allKeyACLS =
-conf.getValByRegex(Pattern.quote(KMSConfiguration.KEY_ACL_PREFIX));
+conf.getValByRegex(KMSConfiguration.KEY_ACL_PREFIX_REGEX);
 for (Map.EntryString, String keyAcl : allKeyACLS.entrySet()) {
   String k = keyAcl.getKey();
   // this should be of type key.acl.KEY_NAME.OP_TYPE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee1e06a3/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index a67c68e..23c983f 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -38,6 +38,7 @@ public class KMSConfiguration {
   public static final String CONFIG_PREFIX = hadoop.kms.;
 
   public static final String KEY_ACL_PREFIX = key.acl.;
+  public static final String

hadoop git commit: HADOOP-11469. KMS should skip default.key.acl and whitelist.key.acl when loading key acl. (Dian Fu via yliu)

2015-01-27 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d483ba25d - 3ac8f8898


HADOOP-11469. KMS should skip default.key.acl and whitelist.key.acl when 
loading key acl. (Dian Fu via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ac8f889
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ac8f889
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ac8f889

Branch: refs/heads/branch-2
Commit: 3ac8f889892cdc0633b1206ee5b83784e26e093f
Parents: d483ba2
Author: yliu y...@apache.org
Authored: Tue Jan 27 23:55:52 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Jan 27 23:55:52 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../hadoop/crypto/key/kms/server/KMSACLs.java |  7 +--
 .../crypto/key/kms/server/KMSConfiguration.java   |  1 +
 .../hadoop/crypto/key/kms/server/TestKMSACLs.java | 18 +++---
 4 files changed, 24 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac8f889/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9b841ce..9ba15ba 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -418,6 +418,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11509. Change parsing sequence in GenericOptionsParser to parse -D
 parameters before -files. (xgong)
 
+HADOOP-11469. KMS should skip default.key.acl and whitelist.key.acl when
+loading key acl. (Dian Fu via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac8f889/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
index c33dd4b..5b67950 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
@@ -36,6 +36,8 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Pattern;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Provides access to the codeAccessControlList/codes used by KMS,
  * hot-reloading them if the codekms-acls.xml/code file where the ACLs
@@ -70,7 +72,8 @@ public class KMSACLs implements Runnable, KeyACLs {
 
   private volatile MapType, AccessControlList acls;
   private volatile MapType, AccessControlList blacklistedAcls;
-  private volatile MapString, HashMapKeyOpType, AccessControlList keyAcls;
+  @VisibleForTesting
+  volatile MapString, HashMapKeyOpType, AccessControlList keyAcls;
   private final MapKeyOpType, AccessControlList defaultKeyAcls =
   new HashMapKeyOpType, AccessControlList();
   private final MapKeyOpType, AccessControlList whitelistKeyAcls =
@@ -112,7 +115,7 @@ public class KMSACLs implements Runnable, KeyACLs {
 MapString, HashMapKeyOpType, AccessControlList tempKeyAcls =
 new HashMapString, HashMapKeyOpType,AccessControlList();
 MapString, String allKeyACLS =
-conf.getValByRegex(Pattern.quote(KMSConfiguration.KEY_ACL_PREFIX));
+conf.getValByRegex(KMSConfiguration.KEY_ACL_PREFIX_REGEX);
 for (Map.EntryString, String keyAcl : allKeyACLS.entrySet()) {
   String k = keyAcl.getKey();
   // this should be of type key.acl.KEY_NAME.OP_TYPE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac8f889/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index a67c68e..23c983f 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -38,6 +38,7 @@ public class KMSConfiguration {
   public static final String CONFIG_PREFIX = hadoop.kms.;
 
   public static final String KEY_ACL_PREFIX = key.acl.;
+  public static final String

hadoop git commit: HDFS-7660. BlockReceiver#close() might be called multiple times, which causes the fsvolume reference being released incorrectly. (Lei Xu via yliu)

2015-01-22 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk aee450061 - 5f124efb3


HDFS-7660. BlockReceiver#close() might be called multiple times, which causes 
the fsvolume reference being released incorrectly. (Lei Xu via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f124efb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f124efb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f124efb

Branch: refs/heads/trunk
Commit: 5f124efb3e090f96f217bee22f3c8897f9772f14
Parents: aee4500
Author: yliu y...@apache.org
Authored: Fri Jan 23 02:37:44 2015 +0800
Committer: yliu y...@apache.org
Committed: Fri Jan 23 02:37:44 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 4 
 .../org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java| 3 ++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f124efb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 74eb160..9176ec7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -768,6 +768,10 @@ Release 2.7.0 - UNRELEASED
 HDFS-3519. Checkpoint upload may interfere with a concurrent saveNamespace.
 (Ming Ma via cnauroth)
 
+HDFS-7660. BlockReceiver#close() might be called multiple times, which
+causes the fsvolume reference being released incorrectly. (Lei Xu via
+yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f124efb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 12041a6..3d37df5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -123,7 +123,7 @@ class BlockReceiver implements Closeable {
   private boolean syncOnClose;
   private long restartBudget;
   /** the reference of the volume where the block receiver writes to */
-  private final ReplicaHandler replicaHandler;
+  private ReplicaHandler replicaHandler;
 
   /**
* for replaceBlock response
@@ -334,6 +334,7 @@ class BlockReceiver implements Closeable {
 }
 if (replicaHandler != null) {
   IOUtils.cleanup(null, replicaHandler);
+  replicaHandler = null;
 }
 if (measuredFlushTime) {
   datanode.metrics.addFlushNanos(flushTotalNanos);



  1   2   >