hadoop git commit: MAPREDUCE-5817. Mappers get rescheduled on node transition even after all reducers are completed. (Sangjin Lee via kasha) (cherry picked from commit 27d24f96ab8d17e839a1ef0d7076efc7

2016-04-18 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 2ec5bb0fe -> cc6ae6fa7


MAPREDUCE-5817. Mappers get rescheduled on node transition even after all 
reducers are completed. (Sangjin Lee via kasha)
(cherry picked from commit 27d24f96ab8d17e839a1ef0d7076efc78d28724a)

(cherry picked from commit b826168173c3386738acc12a5d62577f12aa06e9)

Conflicts:
hadoop-mapreduce-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc6ae6fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc6ae6fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc6ae6fa

Branch: refs/heads/branch-2.7
Commit: cc6ae6fa7e59689a4aaefeafcd7a37d8fc578e77
Parents: 2ec5bb0
Author: Karthik Kambatla 
Authored: Fri Aug 14 12:29:50 2015 -0700
Committer: Wangda Tan 
Committed: Mon Apr 18 21:58:53 2016 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../mapreduce/v2/app/job/impl/JobImpl.java  |  38 --
 .../mapreduce/v2/app/job/impl/TestJobImpl.java  | 130 ++-
 3 files changed, 156 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc6ae6fa/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 69e5d5c..84a5e8c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -75,6 +75,9 @@ Release 2.7.3 - UNRELEASED
 MAPREDUCE-6670. TestJobListCache#testEviction sometimes fails on Windows
 with timeout. (Gergely Novák via junping_du)
 
+MAPREDUCE-5817. Mappers get rescheduled on node transition even after all
+reducers are completed. (Sangjin Lee via kasha)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc6ae6fa/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index dff5ef4..b349ca8 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -130,6 +130,7 @@ import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
 import org.apache.hadoop.yarn.util.Clock;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /** Implementation of Job interface. Maintains the state machines of Job.
@@ -1328,15 +1329,21 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
   
   private void actOnUnusableNode(NodeId nodeId, NodeState nodeState) {
 // rerun previously successful map tasks
-List taskAttemptIdList = 
nodesToSucceededTaskAttempts.get(nodeId);
-if(taskAttemptIdList != null) {
-  String mesg = "TaskAttempt killed because it ran on unusable node "
-  + nodeId;
-  for(TaskAttemptId id : taskAttemptIdList) {
-if(TaskType.MAP == id.getTaskId().getTaskType()) {
-  // reschedule only map tasks because their outputs maybe unusable
-  LOG.info(mesg + ". AttemptId:" + id);
-  eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
+// do this only if the job is still in the running state and there are
+// running reducers
+if (getInternalState() == JobStateInternal.RUNNING &&
+!allReducersComplete()) {
+  List taskAttemptIdList =
+  nodesToSucceededTaskAttempts.get(nodeId);
+  if (taskAttemptIdList != null) {
+String mesg = "TaskAttempt killed because it ran on unusable node "
++ nodeId;
+for (TaskAttemptId id : taskAttemptIdList) {
+  if (TaskType.MAP == id.getTaskId().getTaskType()) {
+// reschedule only map tasks because their outputs maybe unusable
+LOG.info(mesg + ". AttemptId:" + id);
+eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
+  }
 }
   }
 }
@@ -1344,6 +1351,10 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
 // 

hadoop git commit: HDFS-10306. SafeModeMonitor should not leave safe mode if name system is starting active service. Contributed by Mingliang Liu.

2016-04-18 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a69b6b1e8 -> bf4403e03


HDFS-10306. SafeModeMonitor should not leave safe mode if name system is 
starting active service. Contributed by Mingliang Liu.

(cherry picked from commit be0bce1b7171c49e2dca22f56d4e750e606862fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf4403e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf4403e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf4403e0

Branch: refs/heads/branch-2
Commit: bf4403e034429e41a5e039707ec70cc365f1ca2e
Parents: a69b6b1
Author: Jing Zhao 
Authored: Mon Apr 18 18:05:49 2016 -0700
Committer: Jing Zhao 
Committed: Mon Apr 18 18:06:10 2016 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf4403e0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
index 5f13dbf..1249274 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
@@ -618,7 +618,9 @@ class BlockManagerSafeMode {
  * the extension time has passed.
  */
 private boolean canLeave() {
-  if (timeToLeaveExtension() > 0) {
+  if (namesystem.inTransitionToActive()) {
+return false;
+  } else if (timeToLeaveExtension() > 0) {
 reportStatus("STATE* Safe mode ON, in safe mode extension.", false);
 return false;
   } else if (!areThresholdsMet()) {



hadoop git commit: HDFS-10306. SafeModeMonitor should not leave safe mode if name system is starting active service. Contributed by Mingliang Liu.

2016-04-18 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk cb3ca460e -> be0bce1b7


HDFS-10306. SafeModeMonitor should not leave safe mode if name system is 
starting active service. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be0bce1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be0bce1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be0bce1b

Branch: refs/heads/trunk
Commit: be0bce1b7171c49e2dca22f56d4e750e606862fc
Parents: cb3ca46
Author: Jing Zhao 
Authored: Mon Apr 18 18:05:49 2016 -0700
Committer: Jing Zhao 
Committed: Mon Apr 18 18:05:49 2016 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be0bce1b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
index e2d688e..ea70c86 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
@@ -623,7 +623,9 @@ class BlockManagerSafeMode {
  * the extension time has passed.
  */
 private boolean canLeave() {
-  if (timeToLeaveExtension() > 0) {
+  if (namesystem.inTransitionToActive()) {
+return false;
+  } else if (timeToLeaveExtension() > 0) {
 reportStatus("STATE* Safe mode ON, in safe mode extension.", false);
 return false;
   } else if (!areThresholdsMet()) {



[03/27] hadoop git commit: HDFS-10282. The VolumeScanner should warn about replica files which are misplaced. Contributed by Colin Patrick McCabe.

2016-04-18 Thread aengineer
HDFS-10282. The VolumeScanner should warn about replica files which are 
misplaced. Contributed by Colin Patrick McCabe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d1c1152
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d1c1152
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d1c1152

Branch: refs/heads/HDFS-7240
Commit: 0d1c1152f1ce2706f92109bfbdff0d62e98e6797
Parents: df18b6e9
Author: Kihwal Lee 
Authored: Thu Apr 14 07:58:24 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 07:58:24 2016 -0500

--
 .../hdfs/server/datanode/DirectoryScanner.java  | 14 ++---
 .../hdfs/server/datanode/VolumeScanner.java |  2 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 12 
 .../server/datanode/FsDatasetTestUtils.java |  7 +++
 .../hdfs/server/datanode/TestBlockScanner.java  | 63 
 .../fsdataset/impl/FsDatasetImplTestUtils.java  | 21 +++
 6 files changed, 111 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d1c1152/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 0e51cec..1db445e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -914,13 +914,13 @@ public class DirectoryScanner implements Runnable {
  */
 private void verifyFileLocation(File actualBlockFile,
 File bpFinalizedDir, long blockId) {
-  File blockDir = DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
-  if (actualBlockFile.getParentFile().compareTo(blockDir) != 0) {
-File expBlockFile = new File(blockDir, actualBlockFile.getName());
-LOG.warn("Block: " + blockId
-+ " has to be upgraded to block ID-based layout. "
-+ "Actual block file path: " + actualBlockFile
-+ ", expected block file path: " + expBlockFile);
+  File expectedBlockDir =
+  DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
+  File actualBlockDir = actualBlockFile.getParentFile();
+  if (actualBlockDir.compareTo(expectedBlockDir) != 0) {
+LOG.warn("Block: " + blockId +
+" found in invalid directory.  Expected directory: " +
+expectedBlockDir + ".  Actual directory: " + actualBlockDir);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d1c1152/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index d1f2d05..d0dc9ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -414,7 +414,7 @@ public class VolumeScanner extends Thread {
   Block b = volume.getDataset().getStoredBlock(
   cblock.getBlockPoolId(), cblock.getBlockId());
   if (b == null) {
-LOG.info("FileNotFound while finding block {} on volume {}",
+LOG.info("Replica {} was not found in the VolumeMap for volume {}",
 cblock, volume.getBasePath());
   } else {
 block = new ExtendedBlock(cblock.getBlockPoolId(), b);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d1c1152/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 0d060f9..73514b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -697,6 +697,18 @@ public class 

[13/27] hadoop git commit: HADOOP-13026 Should not wrap IOExceptions into a AuthenticationException in KerberosAuthenticator. Xuan Gong via stevel

2016-04-18 Thread aengineer
HADOOP-13026 Should not wrap IOExceptions into a AuthenticationException in 
KerberosAuthenticator. Xuan Gong via stevel


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4feed9b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4feed9b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4feed9b2

Branch: refs/heads/HDFS-7240
Commit: 4feed9b2dbff7bc52871cde7e1ff31b862e4fe9a
Parents: 6e6b6dd
Author: Steve Loughran 
Authored: Fri Apr 15 17:43:38 2016 +0100
Committer: Steve Loughran 
Committed: Fri Apr 15 17:44:12 2016 +0100

--
 .../security/authentication/client/KerberosAuthenticator.java  | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4feed9b2/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index 0f046ae..a69ee46 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -327,7 +327,11 @@ public class KerberosAuthenticator implements 
Authenticator {
 }
   });
 } catch (PrivilegedActionException ex) {
-  throw new AuthenticationException(ex.getException());
+  if (ex.getException() instanceof IOException) {
+throw (IOException) ex.getException();
+  } else {
+throw new AuthenticationException(ex.getException());
+  }
 } catch (LoginException ex) {
   throw new AuthenticationException(ex);
 }



[26/27] hadoop git commit: HDFS-10265. OEV tool fails to read edit xml file if OP_UPDATE_BLOCKS has no BLOCK tag (Wan Chang via cmccabe)

2016-04-18 Thread aengineer
HDFS-10265. OEV tool fails to read edit xml file if OP_UPDATE_BLOCKS has no 
BLOCK tag (Wan Chang via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb3ca460
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb3ca460
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb3ca460

Branch: refs/heads/HDFS-7240
Commit: cb3ca460efb97be8c031bdb14bb7705cc25f2117
Parents: 4770037
Author: Colin Patrick Mccabe 
Authored: Mon Apr 18 11:45:18 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Mon Apr 18 13:47:56 2016 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSEditLogOp.java |  3 ++-
 .../test/java/org/apache/hadoop/hdfs/DFSTestUtil.java   | 12 
 2 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3ca460/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index c4e1a78..a3285a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -1096,7 +1096,8 @@ public abstract class FSEditLogOp {
 
 @Override void fromXml(Stanza st) throws InvalidXmlException {
   this.path = st.getValue("PATH");
-  List blocks = st.getChildren("BLOCK");
+  List blocks = st.hasChildren("BLOCK") ?
+  st.getChildren("BLOCK") : new ArrayList();
   this.blocks = new Block[blocks.size()];
   for (int i = 0; i < blocks.size(); i++) {
 this.blocks[i] = FSEditLogOp.blockFromXml(blocks.get(i));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3ca460/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 8a52bbb..d159fc5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -1272,6 +1272,18 @@ public class DFSTestUtil {
 // OP_APPEND 47
 FSDataOutputStream s2 = filesystem.append(pathFileCreate, 4096, null);
 s2.close();
+
+// OP_UPDATE_BLOCKS 25
+final String updateBlockFile = "/update_blocks";
+FSDataOutputStream fout = filesystem.create(new Path(updateBlockFile), 
true, 4096, (short)1, 4096L);
+fout.write(1);
+fout.hflush();
+long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();
+DFSClient dfsclient = DFSClientAdapter.getDFSClient(filesystem);
+LocatedBlocks blocks = 
dfsclient.getNamenode().getBlockLocations(updateBlockFile, 0, 
Integer.MAX_VALUE);
+dfsclient.getNamenode().abandonBlock(blocks.get(0).getBlock(), fileId, 
updateBlockFile, dfsclient.clientName);
+fout.close();
+
 // OP_SET_STORAGE_POLICY 45
 filesystem.setStoragePolicy(pathFileCreate,
 HdfsConstants.HOT_STORAGE_POLICY_NAME);



[06/27] hadoop git commit: HDFS-10286. Fix TestDFSAdmin#testNameNodeGetReconfigurableProperties. Contributed by Xiaobing Zhou.

2016-04-18 Thread aengineer
HDFS-10286. Fix TestDFSAdmin#testNameNodeGetReconfigurableProperties. 
Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80922675
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80922675
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80922675

Branch: refs/heads/HDFS-7240
Commit: 809226752dd109e16956038017dece16ada6ee0f
Parents: c970f1d
Author: Xiaoyu Yao 
Authored: Thu Apr 14 10:56:33 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Apr 14 10:57:02 2016 -0700

--
 .../src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80922675/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 3ca7fec..63bdf74 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -234,7 +234,7 @@ public class TestDFSAdmin {
 final List outs = Lists.newArrayList();
 final List errs = Lists.newArrayList();
 getReconfigurableProperties("namenode", address, outs, errs);
-assertEquals(4, outs.size());
+assertEquals(5, outs.size());
 assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(1));
 assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(2));
 assertEquals(errs.size(), 0);



[20/27] hadoop git commit: YARN-4965. Distributed shell AM failed due to ClientHandlerException thrown by jersey. Contributed by Junping Du

2016-04-18 Thread aengineer
YARN-4965. Distributed shell AM failed due to ClientHandlerException thrown by 
jersey. Contributed by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6c07420
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6c07420
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6c07420

Branch: refs/heads/HDFS-7240
Commit: e6c0742012ffeacad2bcaf712d86a7e5d1420b26
Parents: cc8b83a
Author: Xuan 
Authored: Sat Apr 16 19:39:18 2016 -0700
Committer: Xuan 
Committed: Sat Apr 16 19:39:18 2016 -0700

--
 .../jobhistory/JobHistoryEventHandler.java  |  9 ++--
 .../pom.xml |  6 +++
 .../distributedshell/ApplicationMaster.java | 10 ++--
 .../distributedshell/TestDistributedShell.java  | 50 
 .../client/api/impl/TestTimelineClient.java |  2 +-
 5 files changed, 67 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6c07420/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 5690743..47d2389 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -77,6 +77,8 @@ import org.codehaus.jackson.node.JsonNodeFactory;
 import org.codehaus.jackson.node.ObjectNode;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.sun.jersey.api.client.ClientHandlerException;
+
 /**
  * The job history events get routed to this class. This class writes the Job
  * history events to the DFS directly into a staging dir and then moved to a
@@ -1032,12 +1034,9 @@ public class JobHistoryEventHandler extends 
AbstractService
   + error.getErrorCode());
 }
   }
-} catch (IOException ex) {
-  LOG.error("Error putting entity " + tEntity.getEntityId() + " to 
Timeline"
-  + "Server", ex);
-} catch (YarnException ex) {
+} catch (YarnException | IOException | ClientHandlerException ex) {
   LOG.error("Error putting entity " + tEntity.getEntityId() + " to 
Timeline"
-  + "Server", ex);
+  + "Server", ex);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6c07420/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
index c118603..dba8fc0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
@@ -133,6 +133,12 @@
 
 
   org.apache.hadoop
+  hadoop-yarn-common
+  test-jar
+  test
+
+
+  org.apache.hadoop
   hadoop-hdfs
   test
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6c07420/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index cbe0348..2b85ba8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 

[15/27] hadoop git commit: HDFS-10293. StripedFileTestUtil#readAll flaky. Contributed by Mingliang Liu.

2016-04-18 Thread aengineer
HDFS-10293. StripedFileTestUtil#readAll flaky. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55e19b7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55e19b7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55e19b7f

Branch: refs/heads/HDFS-7240
Commit: 55e19b7f0c1243090dff2d08ed785cefd420b009
Parents: 89a8387
Author: Jing Zhao 
Authored: Fri Apr 15 10:53:40 2016 -0700
Committer: Jing Zhao 
Committed: Fri Apr 15 10:53:40 2016 -0700

--
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 21 ++--
 1 file changed, 6 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55e19b7f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 0f0221c..6d0dfa8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 import org.junit.Assert;
@@ -85,16 +86,6 @@ public class StripedFileTestUtil {
 return (byte) (pos % mod + 1);
   }
 
-  static int readAll(FSDataInputStream in, byte[] buf) throws IOException {
-int readLen = 0;
-int ret;
-while ((ret = in.read(buf, readLen, buf.length - readLen)) >= 0 &&
-readLen <= buf.length) {
-  readLen += ret;
-}
-return readLen;
-  }
-
   static void verifyLength(FileSystem fs, Path srcPath, int fileLength)
   throws IOException {
 FileStatus status = fs.getFileStatus(srcPath);
@@ -214,11 +205,11 @@ public class StripedFileTestUtil {
   static void assertSeekAndRead(FSDataInputStream fsdis, int pos,
   int writeBytes) throws IOException {
 fsdis.seek(pos);
-byte[] buf = new byte[writeBytes];
-int readLen = StripedFileTestUtil.readAll(fsdis, buf);
-assertEquals(readLen, writeBytes - pos);
-for (int i = 0; i < readLen; i++) {
-  assertEquals("Byte at " + i + " should be the same", 
StripedFileTestUtil.getByte(pos + i), buf[i]);
+byte[] buf = new byte[writeBytes - pos];
+IOUtils.readFully(fsdis, buf, 0, buf.length);
+for (int i = 0; i < buf.length; i++) {
+  assertEquals("Byte at " + i + " should be the same",
+  StripedFileTestUtil.getByte(pos + i), buf[i]);
 }
   }
 



[18/27] hadoop git commit: YARN-4468. Document the general ReservationSystem functionality, and the REST API. (subru and carlo via asuresh)

2016-04-18 Thread aengineer
YARN-4468. Document the general ReservationSystem functionality, and the REST 
API. (subru and carlo via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cab9cbaa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cab9cbaa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cab9cbaa

Branch: refs/heads/HDFS-7240
Commit: cab9cbaa0a6d92dd6473545da0ea1e6a22fd09e1
Parents: 69f3d42
Author: Arun Suresh 
Authored: Fri Apr 15 16:58:49 2016 -0700
Committer: Arun Suresh 
Committed: Fri Apr 15 16:58:49 2016 -0700

--
 hadoop-project/src/site/site.xml|   1 +
 .../src/site/markdown/ReservationSystem.md  |  65 +++
 .../src/site/markdown/ResourceManagerRest.md| 447 ++-
 .../hadoop-yarn-site/src/site/markdown/YARN.md  |   2 +
 .../images/yarn_reservation_system.png  | Bin 0 -> 85449 bytes
 5 files changed, 513 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cab9cbaa/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 8f02073..f9f4726 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -133,6 +133,7 @@
   
   
   
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cab9cbaa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ReservationSystem.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ReservationSystem.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ReservationSystem.md
new file mode 100644
index 000..eda8d4d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ReservationSystem.md
@@ -0,0 +1,65 @@
+
+
+Reservation System
+==
+
+
+* [Purpose](#Purpose)
+* [Overview](#Overview)
+* [Flow of a Reservation](#Flow_of_a_Reservation)
+* [Configuring the Reservation System](#Configuring_the_Reservation_System)
+
+Purpose
+---
+
+This document provides a brief overview of the `YARN ReservationSystem`.
+
+Overview
+
+
+The `ReservationSystem` of YARN provides the user the ability to reserve 
resources over (and ahead of) time, to ensure that important production jobs 
will be run very predictably. The ReservationSystem performs careful admission 
control and provides guarantees over absolute amounts of resources (instead of 
% of cluster size). Reservation can be both malleable or have gang semantics, 
and can have time-varying resource requirements. The ReservationSystem is a 
component of the YARN ResourceManager.
+
+
+Flow of a Reservation
+--
+
+![YARN Reservation System | width=600px](./images/yarn_reservation_system.png)
+
+With reference to the figure above, a typical reservation proceeds as follows:
+
+ * **Step 1**  The user (or an automated tool on its behalf) submit a 
reservation request specified by the Reservation Definition Language (RDL). 
This describes the user need for resources over-time (e.g., a skyline of 
resources) and temporal constraints (e.g., deadline). This can be done both 
programmatically through the usual Client-to-RM protocols or via the REST api 
of the RM.
+
+ * **Step 2**  The ReservationSystem leverages a ReservationAgent (GREE in the 
figure) to find a plausible allocation for the reservation in the Plan, a data 
structure tracking all reservation currently accepted and the available 
resources in the system.
+
+ * **Step 3**  The SharingPolicy provides a way to enforce invariants on the 
reservation being accepted, potentially rejecting reservations. For example, 
the CapacityOvertimePolicy allows enforcement of both instantaneous 
max-capacity a user can request across all of his/her reservations and a limit 
on the integral of resources over a period of time, e.g., the user can reserve 
up to 50% of the cluster capacity instantanesouly, but in any 24h period of 
time he/she cannot exceed 10% average.
+
+ * **Step 4**  Upon a successful validation the ReservationSystem returns to 
the user a ReservationId (think of it as an airline ticket).
+
+ * **Step 5**  When the time comes, a new component called the PlanFollower 
publishes the state of the plan to the scheduler, by dynamically 
creating/tweaking/destroying queues.
+
+ * **Step 6**  The user can then submit one (or more) jobs to the reservable 
queue, by simply including the ReservationId as part of the 
ApplicationSubmissionContext.
+
+ * **Step 7**  The Scheduler will then provide containers from a special queue 
created to ensure resources 

[02/27] hadoop git commit: HADOOP-12963 Allow using path style addressing for accessing the s3 endpoint. (Stephen Montgomery via stevel)

2016-04-18 Thread aengineer
HADOOP-12963 Allow using path style addressing for accessing the s3 endpoint. 
(Stephen Montgomery via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df18b6e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df18b6e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df18b6e9

Branch: refs/heads/HDFS-7240
Commit: df18b6e9849c53c51a3d317f1254298edd8b17d1
Parents: 40211d1
Author: Steve Loughran 
Authored: Thu Apr 14 12:44:55 2016 +0100
Committer: Steve Loughran 
Committed: Thu Apr 14 12:44:55 2016 +0100

--
 .../src/main/resources/core-default.xml |  7 +++
 .../org/apache/hadoop/fs/s3a/Constants.java | 10 +++--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 10 +
 .../src/site/markdown/tools/hadoop-aws/index.md |  7 +++
 .../hadoop/fs/s3a/TestS3AConfiguration.java | 47 ++--
 5 files changed, 75 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df18b6e9/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index b3436da..96b108f 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -766,6 +766,13 @@
 
 
 
+  fs.s3a.path.style.access
+  Enable S3 path style access ie disabling the default virtual 
hosting behaviour.
+Useful for S3A-compliant storage providers as it removes the need to set 
up DNS for virtual hosting.
+  
+
+
+
   fs.s3a.proxy.host
   Hostname of the (optional) proxy server for S3 
connections.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df18b6e9/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index f10f3db..9d79623 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -28,13 +28,17 @@ public class Constants {
   // number of simultaneous connections to s3
   public static final String MAXIMUM_CONNECTIONS = "fs.s3a.connection.maximum";
   public static final int DEFAULT_MAXIMUM_CONNECTIONS = 15;
-  
+
   // connect to s3 over ssl?
   public static final String SECURE_CONNECTIONS = 
"fs.s3a.connection.ssl.enabled";
   public static final boolean DEFAULT_SECURE_CONNECTIONS = true;
 
   //use a custom endpoint?
   public static final String ENDPOINT = "fs.s3a.endpoint";
+
+  //Enable path style access? Overrides default virtual hosting
+  public static final String PATH_STYLE_ACCESS = "fs.s3a.path.style.access";
+
   //connect to s3 through a proxy server?
   public static final String PROXY_HOST = "fs.s3a.proxy.host";
   public static final String PROXY_PORT = "fs.s3a.proxy.port";
@@ -50,7 +54,7 @@ public class Constants {
   // seconds until we give up trying to establish a connection to s3
   public static final String ESTABLISH_TIMEOUT = 
"fs.s3a.connection.establish.timeout";
   public static final int DEFAULT_ESTABLISH_TIMEOUT = 5;
-  
+
   // seconds until we give up on a connection to s3
   public static final String SOCKET_TIMEOUT = "fs.s3a.connection.timeout";
   public static final int DEFAULT_SOCKET_TIMEOUT = 20;
@@ -74,7 +78,7 @@ public class Constants {
   // size of each of or multipart pieces in bytes
   public static final String MULTIPART_SIZE = "fs.s3a.multipart.size";
   public static final long DEFAULT_MULTIPART_SIZE = 104857600; // 100 MB
-  
+
   // minimum size in bytes before we start a multipart uploads or copy
   public static final String MIN_MULTIPART_THRESHOLD = 
"fs.s3a.multipart.threshold";
   public static final long DEFAULT_MIN_MULTIPART_THRESHOLD = Integer.MAX_VALUE;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df18b6e9/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index fe705ce..97092ac 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 

[23/27] hadoop git commit: HDFS-10275. TestDataNodeMetrics failing intermittently due to TotalWriteTime counted incorrectly. Contributed by Lin Yiqun.

2016-04-18 Thread aengineer
HDFS-10275. TestDataNodeMetrics failing intermittently due to TotalWriteTime 
counted incorrectly. Contributed by Lin Yiqun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab903029
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab903029
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab903029

Branch: refs/heads/HDFS-7240
Commit: ab903029a9d353677184ff5602966b11ffb408b9
Parents: 67523ff
Author: Walter Su 
Authored: Mon Apr 18 20:29:29 2016 +0800
Committer: Walter Su 
Committed: Mon Apr 18 20:29:29 2016 +0800

--
 .../hadoop/hdfs/server/datanode/TestDataNodeMetrics.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab903029/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 5f9b602..355f7a1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -258,10 +258,9 @@ public class TestDataNodeMetrics {
* and reading causes totalReadTime to move.
* @throws Exception
*/
-  @Test(timeout=6)
+  @Test(timeout=12)
   public void testDataNodeTimeSpend() throws Exception {
 Configuration conf = new HdfsConfiguration();
-SimulatedFSDataset.setFactory(conf);
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
 try {
   final FileSystem fs = cluster.getFileSystem();
@@ -284,6 +283,7 @@ public class TestDataNodeMetrics {
 DFSTestUtil.createFile(fs, new Path("/time.txt." + x.get()),
 LONG_FILE_LEN, (short) 1, Time.monotonicNow());
 DFSTestUtil.readFile(fs, new Path("/time.txt." + x.get()));
+fs.delete(new Path("/time.txt." + x.get()), true);
   } catch (IOException ioe) {
 LOG.error("Caught IOException while ingesting DN metrics", ioe);
 return false;
@@ -294,7 +294,7 @@ public class TestDataNodeMetrics {
   return endWriteValue > startWriteValue
   && endReadValue > startReadValue;
 }
-  }, 30, 3);
+  }, 30, 6);
 } finally {
   if (cluster != null) {
 cluster.shutdown();



[22/27] hadoop git commit: HDFS-9412. getBlocks occupies FSLock and takes too long to complete. Contributed by He Tianyi.

2016-04-18 Thread aengineer
HDFS-9412. getBlocks occupies FSLock and takes too long to complete. 
Contributed by He Tianyi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67523ffc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67523ffc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67523ffc

Branch: refs/heads/HDFS-7240
Commit: 67523ffcf491f4f2db5335899c00a174d0caaa9b
Parents: fdc46bf
Author: Walter Su 
Authored: Mon Apr 18 09:28:02 2016 +0800
Committer: Walter Su 
Committed: Mon Apr 18 09:28:02 2016 +0800

--
 .../hdfs/server/blockmanagement/BlockManager.java  | 17 +
 .../java/org/apache/hadoop/hdfs/TestGetBlocks.java |  8 ++--
 2 files changed, 23 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67523ffc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 104d723..8b50ef8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -305,6 +305,14 @@ public class BlockManager implements BlockStatsMXBean {
* processed again after aquiring lock again.
*/
   private int numBlocksPerIteration;
+
+  /**
+   * Minimum size that a block can be sent to Balancer through getBlocks.
+   * And after HDFS-8824, the small blocks are unused anyway, so there's no
+   * point to send them to balancer.
+   */
+  private long getBlocksMinBlockSize = -1;
+
   /**
* Progress of the Reconstruction queues initialisation.
*/
@@ -414,6 +422,9 @@ public class BlockManager implements BlockStatsMXBean {
 this.numBlocksPerIteration = conf.getInt(
 DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT,
 DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT);
+this.getBlocksMinBlockSize = conf.getLongBytes(
+DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY,
+DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_DEFAULT);
 this.blockReportLeaseManager = new BlockReportLeaseManager(conf);
 
 bmSafeMode = new BlockManagerSafeMode(this, namesystem, haEnabled, conf);
@@ -1179,6 +1190,9 @@ public class BlockManager implements BlockStatsMXBean {
 while(totalSizehttp://git-wip-us.apache.org/repos/asf/hadoop/blob/67523ffc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
index 741e641..6e4b0f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
@@ -179,11 +179,15 @@ public class TestGetBlocks {
 final int DEFAULT_BLOCK_SIZE = 1024;
 
 CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
+CONF.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY,
+  DEFAULT_BLOCK_SIZE);
+
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
 REPLICATION_FACTOR).build();
 try {
   cluster.waitActive();
-  long fileLen = 2 * DEFAULT_BLOCK_SIZE;
+  // the third block will not be visible to getBlocks
+  long fileLen = 2 * DEFAULT_BLOCK_SIZE + 1;
   DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/tmp.txt"),
   fileLen, REPLICATION_FACTOR, 0L);
 
@@ -196,7 +200,7 @@ public class TestGetBlocks {
 DFSUtilClient.getNNAddress(CONF), CONF);
 locatedBlocks = dfsclient.getNamenode()
 .getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
-assertEquals(2, locatedBlocks.size());
+assertEquals(3, locatedBlocks.size());
 notWritten = false;
 for (int i = 0; i < 2; i++) {
   dataNodes = locatedBlocks.get(i).getLocations();



[16/27] hadoop git commit: YARN-4909. Fix intermittent failures of TestRMWebServices And TestRMWithCSRFFilter. Contributed by Bibin A Chundatt

2016-04-18 Thread aengineer
YARN-4909. Fix intermittent failures of TestRMWebServices And 
TestRMWithCSRFFilter. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdbafbc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdbafbc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdbafbc9

Branch: refs/heads/HDFS-7240
Commit: fdbafbc9e59314d9f9f75e615de9d2dfdced017b
Parents: 55e19b7
Author: Naganarasimha 
Authored: Fri Apr 15 23:37:05 2016 +0530
Committer: Naganarasimha 
Committed: Fri Apr 15 23:37:05 2016 +0530

--
 .../apache/hadoop/yarn/webapp/JerseyTestBase.java | 18 +-
 1 file changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdbafbc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
index 7a225a3..d537fa7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
@@ -19,9 +19,10 @@
 package org.apache.hadoop.yarn.webapp;
 
 import java.io.IOException;
+import java.util.Random;
 
 import org.apache.hadoop.net.ServerSocketUtil;
-import org.junit.Before;
+
 import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
@@ -30,9 +31,16 @@ public abstract class JerseyTestBase extends JerseyTest {
 super(appDescriptor);
   }
 
-  @Before
-  public void initializeJerseyPort() throws IOException {
-int jerseyPort = ServerSocketUtil.getPort(9998, 10);
-System.setProperty("jersey.test.port", Integer.toString(jerseyPort));
+  @Override
+  protected int getPort(int port) {
+Random rand = new Random();
+int jerseyPort = port + rand.nextInt(1000);
+try {
+  jerseyPort = ServerSocketUtil.getPort(jerseyPort, 10);
+} catch (IOException e) {
+  // Ignore exception even after 10 times free port is
+  // not received.
+}
+return super.getPort(jerseyPort);
   }
 }



[19/27] hadoop git commit: MAPREDUCE-6649. getFailureInfo not returning any failure info. Contributed by Eric Badger

2016-04-18 Thread aengineer
MAPREDUCE-6649. getFailureInfo not returning any failure info. Contributed by 
Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc8b83a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc8b83a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc8b83a8

Branch: refs/heads/HDFS-7240
Commit: cc8b83a8e85bfc65974cf5e86337855cd4724c1d
Parents: cab9cba
Author: Eric Payne 
Authored: Sat Apr 16 22:07:27 2016 +
Committer: Eric Payne 
Committed: Sat Apr 16 22:07:27 2016 +

--
 .../hadoop/mapreduce/v2/hs/CompletedJob.java|  9 ++-
 .../mapreduce/v2/hs/TestJobHistoryEntities.java | 27 
 2 files changed, 35 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc8b83a8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
index ad4e6bc..4deb9ae 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
@@ -142,6 +142,7 @@ public class CompletedJob implements 
org.apache.hadoop.mapreduce.v2.app.job.Job
 report.setFinishTime(jobInfo.getFinishTime());
 report.setJobName(jobInfo.getJobname());
 report.setUser(jobInfo.getUsername());
+report.setDiagnostics(jobInfo.getErrorInfo());
 
 if ( getTotalMaps() == 0 ) {
   report.setMapProgress(1.0f);
@@ -335,6 +336,12 @@ public class CompletedJob implements 
org.apache.hadoop.mapreduce.v2.app.job.Job
 }
   }
 
+  protected JobHistoryParser createJobHistoryParser(Path historyFileAbsolute)
+  throws IOException {
+return new JobHistoryParser(historyFileAbsolute.getFileSystem(conf),
+historyFileAbsolute);
+  }
+
   //History data is leisurely loaded when task level data is requested
   protected synchronized void loadFullHistoryData(boolean loadTasks,
   Path historyFileAbsolute) throws IOException {
@@ -347,7 +354,7 @@ public class CompletedJob implements 
org.apache.hadoop.mapreduce.v2.app.job.Job
   JobHistoryParser parser = null;
   try {
 final FileSystem fs = historyFileAbsolute.getFileSystem(conf);
-parser = new JobHistoryParser(fs, historyFileAbsolute);
+parser = createJobHistoryParser(historyFileAbsolute);
 final Path jobConfPath = new Path(historyFileAbsolute.getParent(),
 JobHistoryUtils.getIntermediateConfFileName(jobId));
 final Configuration conf = new Configuration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc8b83a8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java
index 9608fc8..c6ddae5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java
@@ -19,14 +19,18 @@ package org.apache.hadoop.mapreduce.v2.hs;
 
 import static org.junit.Assert.assertEquals;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.JobACLsManager;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ 

[07/27] hadoop git commit: MAPREDUCE-6513. MR job got hanged forever when one NM unstable for some time. (Varun Saxena via wangda)

2016-04-18 Thread aengineer
MAPREDUCE-6513. MR job got hanged forever when one NM unstable for some time. 
(Varun Saxena via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b2880c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b2880c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b2880c0

Branch: refs/heads/HDFS-7240
Commit: 8b2880c0b62102fc5c8b6962752f72cb2c416a01
Parents: 8092267
Author: Wangda Tan 
Authored: Thu Apr 14 11:00:53 2016 -0700
Committer: Wangda Tan 
Committed: Thu Apr 14 11:00:53 2016 -0700

--
 .../v2/app/job/event/TaskAttemptKillEvent.java  | 15 +++-
 .../app/job/event/TaskTAttemptKilledEvent.java  | 40 +
 .../mapreduce/v2/app/job/impl/JobImpl.java  |  4 +-
 .../v2/app/job/impl/TaskAttemptImpl.java| 48 ---
 .../mapreduce/v2/app/job/impl/TaskImpl.java | 25 +-
 .../v2/app/rm/RMContainerAllocator.java |  4 +-
 .../hadoop/mapreduce/v2/app/TestMRApp.java  | 51 +++-
 .../v2/app/job/impl/TestTaskAttempt.java| 87 ++--
 .../mapreduce/v2/app/job/impl/TestTaskImpl.java | 75 ++---
 9 files changed, 311 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b2880c0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java
index 9bcc838..767ef0d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java
@@ -24,14 +24,27 @@ import 
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 public class TaskAttemptKillEvent extends TaskAttemptEvent {
 
   private final String message;
+  // Next map attempt will be rescheduled(i.e. updated in ask with higher
+  // priority equivalent to that of a fast fail map)
+  private final boolean rescheduleAttempt;
 
   public TaskAttemptKillEvent(TaskAttemptId attemptID,
-  String message) {
+  String message, boolean rescheduleAttempt) {
 super(attemptID, TaskAttemptEventType.TA_KILL);
 this.message = message;
+this.rescheduleAttempt = rescheduleAttempt;
+  }
+
+  public TaskAttemptKillEvent(TaskAttemptId attemptID,
+  String message) {
+this(attemptID, message, false);
   }
 
   public String getMessage() {
 return message;
   }
+
+  public boolean getRescheduleAttempt() {
+return rescheduleAttempt;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b2880c0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptKilledEvent.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptKilledEvent.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptKilledEvent.java
new file mode 100644
index 000..897444d
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptKilledEvent.java
@@ -0,0 +1,40 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the 

[21/27] hadoop git commit: YARN-4934. Reserved Resource for QueueMetrics needs to be handled correctly in few cases. (Sunil G via wangda)

2016-04-18 Thread aengineer
YARN-4934. Reserved Resource for QueueMetrics needs to be handled correctly in 
few cases. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdc46bfb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdc46bfb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdc46bfb

Branch: refs/heads/HDFS-7240
Commit: fdc46bfb37776d8c41b68f6c33a2379d0f329994
Parents: e6c0742
Author: Wangda Tan 
Authored: Sat Apr 16 22:47:41 2016 -0700
Committer: Wangda Tan 
Committed: Sat Apr 16 22:47:41 2016 -0700

--
 .../scheduler/capacity/LeafQueue.java   |   7 -
 .../scheduler/common/fica/FiCaSchedulerApp.java |   2 +
 .../capacity/TestContainerAllocation.java   | 188 ++-
 3 files changed, 189 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdc46bfb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index aabdf9c..fbcb91c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -1348,13 +1348,6 @@ public class LeafQueue extends AbstractCSQueue {
 // Book-keeping
 if (removed) {
 
-  // track reserved resource for metrics, for normal container
-  // getReservedResource will be null.
-  Resource reservedRes = rmContainer.getReservedResource();
-  if (reservedRes != null && !reservedRes.equals(Resources.none())) {
-decReservedResource(node.getPartition(), reservedRes);
-  }
-
   // Inform the ordering policy
   orderingPolicy.containerReleased(application, rmContainer);
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdc46bfb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index f474aad..35329d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -246,6 +246,8 @@ public class FiCaSchedulerApp extends 
SchedulerApplicationAttempt {
   // Update reserved metrics
   queue.getMetrics().unreserveResource(getUser(),
   rmContainer.getReservedResource());
+  queue.decReservedResource(node.getPartition(),
+  rmContainer.getReservedResource());
   return true;
 }
 return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdc46bfb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index 84eba10..f94c963 100644
--- 

[04/27] hadoop git commit: HDFS-10216. Distcp -diff throws exception when handling relative path. Contributed by Takashi Ohnishi.

2016-04-18 Thread aengineer
HDFS-10216. Distcp -diff throws exception when handling relative path. 
Contributed by Takashi Ohnishi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/404f57f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/404f57f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/404f57f3

Branch: refs/heads/HDFS-7240
Commit: 404f57f328b00a42ec8b952ad08cd7a80207c7f2
Parents: 0d1c115
Author: Jing Zhao 
Authored: Thu Apr 14 10:35:22 2016 -0700
Committer: Jing Zhao 
Committed: Thu Apr 14 10:35:22 2016 -0700

--
 .../apache/hadoop/tools/SimpleCopyListing.java  |  2 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java | 38 
 2 files changed, 39 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/404f57f3/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
index d2598a4..cabb7e3 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
@@ -191,7 +191,7 @@ public class SimpleCopyListing extends CopyListing {
   authority = fs.getUri().getAuthority();
 }
 
-return new Path(scheme, authority, path.toUri().getPath());
+return new Path(scheme, authority, makeQualified(path).toUri().getPath());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/404f57f3/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
index 04de8e4..90e6840 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
@@ -674,4 +674,42 @@ public class TestDistCpSync {
 
 testAndVerify(numCreatedModified);
   }
+
+  private void initData9(Path dir) throws Exception {
+final Path foo = new Path(dir, "foo");
+final Path foo_f1 = new Path(foo, "f1");
+
+DFSTestUtil.createFile(dfs, foo_f1, BLOCK_SIZE, DATA_NUM, 0L);
+  }
+
+  private void changeData9(Path dir) throws Exception {
+final Path foo = new Path(dir, "foo");
+final Path foo_f2 = new Path(foo, "f2");
+
+DFSTestUtil.createFile(dfs, foo_f2, BLOCK_SIZE, DATA_NUM, 0L);
+  }
+
+  /**
+   * Test a case where the source path is relative.
+   */
+  @Test
+  public void testSync9() throws Exception {
+
+// use /user/$USER/source for source directory
+Path sourcePath = new Path(dfs.getWorkingDirectory(), "source");
+initData9(sourcePath);
+initData9(target);
+dfs.allowSnapshot(sourcePath);
+dfs.allowSnapshot(target);
+dfs.createSnapshot(sourcePath, "s1");
+dfs.createSnapshot(target, "s1");
+changeData9(sourcePath);
+dfs.createSnapshot(sourcePath, "s2");
+
+String[] args = new String[]{"-update","-diff", "s1", "s2",
+   "source", target.toString()};
+new DistCp(conf, OptionsParser.parse(args)).execute();
+verifyCopy(dfs.getFileStatus(sourcePath),
+ dfs.getFileStatus(target), false);
+  }
 }



[12/27] hadoop git commit: HADOOP-12989. Some tests in org.apache.hadoop.fs.shell.find occasionally time out. Contributed by Takashi Ohnishi.

2016-04-18 Thread aengineer
HADOOP-12989. Some tests in org.apache.hadoop.fs.shell.find occasionally time 
out. Contributed by Takashi Ohnishi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e6b6dd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e6b6dd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e6b6dd5

Branch: refs/heads/HDFS-7240
Commit: 6e6b6dd5aaf93cfb373833cd175ee722d2cb708f
Parents: b9c9d03
Author: Akira Ajisaka 
Authored: Fri Apr 15 14:14:36 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Apr 15 14:14:36 2016 +0900

--
 .../apache/hadoop/fs/shell/find/TestAnd.java| 25 +++-
 .../fs/shell/find/TestFilterExpression.java | 27 +++--
 .../apache/hadoop/fs/shell/find/TestFind.java   |  3 +-
 .../apache/hadoop/fs/shell/find/TestIname.java  | 17 +---
 .../apache/hadoop/fs/shell/find/TestName.java   | 17 +---
 .../apache/hadoop/fs/shell/find/TestPrint.java  |  9 -
 .../apache/hadoop/fs/shell/find/TestPrint0.java |  9 -
 .../apache/hadoop/fs/shell/find/TestResult.java | 41 +++-
 8 files changed, 92 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e6b6dd5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java
index d82a25e..bb5ca4c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java
@@ -26,12 +26,17 @@ import java.util.Deque;
 import java.util.LinkedList;
 
 import org.apache.hadoop.fs.shell.PathData;
+import org.junit.Rule;
+import org.junit.rules.Timeout;
 import org.junit.Test;
 
 public class TestAnd {
 
+  @Rule
+  public Timeout globalTimeout = new Timeout(1);
+
   // test all expressions passing
-  @Test(timeout = 1000)
+  @Test
   public void testPass() throws IOException {
 And and = new And();
 
@@ -56,7 +61,7 @@ public class TestAnd {
   }
 
   // test the first expression failing
-  @Test(timeout = 1000)
+  @Test
   public void testFailFirst() throws IOException {
 And and = new And();
 
@@ -80,7 +85,7 @@ public class TestAnd {
   }
 
   // test the second expression failing
-  @Test(timeout = 1000)
+  @Test
   public void testFailSecond() throws IOException {
 And and = new And();
 
@@ -105,7 +110,7 @@ public class TestAnd {
   }
 
   // test both expressions failing
-  @Test(timeout = 1000)
+  @Test
   public void testFailBoth() throws IOException {
 And and = new And();
 
@@ -129,7 +134,7 @@ public class TestAnd {
   }
 
   // test the first expression stopping
-  @Test(timeout = 1000)
+  @Test
   public void testStopFirst() throws IOException {
 And and = new And();
 
@@ -154,7 +159,7 @@ public class TestAnd {
   }
 
   // test the second expression stopping
-  @Test(timeout = 1000)
+  @Test
   public void testStopSecond() throws IOException {
 And and = new And();
 
@@ -179,7 +184,7 @@ public class TestAnd {
   }
 
   // test first expression stopping and second failing
-  @Test(timeout = 1000)
+  @Test
   public void testStopFail() throws IOException {
 And and = new And();
 
@@ -204,7 +209,7 @@ public class TestAnd {
   }
 
   // test setOptions is called on child
-  @Test(timeout = 1000)
+  @Test
   public void testSetOptions() throws IOException {
 And and = new And();
 Expression first = mock(Expression.class);
@@ -224,7 +229,7 @@ public class TestAnd {
   }
 
   // test prepare is called on child
-  @Test(timeout = 1000)
+  @Test
   public void testPrepare() throws IOException {
 And and = new And();
 Expression first = mock(Expression.class);
@@ -243,7 +248,7 @@ public class TestAnd {
   }
 
   // test finish is called on child
-  @Test(timeout = 1000)
+  @Test
   public void testFinish() throws IOException {
 And and = new And();
 Expression first = mock(Expression.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e6b6dd5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFilterExpression.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFilterExpression.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFilterExpression.java
index 5986a06..7ad0574 100644
--- 

[14/27] hadoop git commit: HDFS-10283. o.a.h.hdfs.server.namenode.TestFSImageWithSnapshot#testSaveLoadImageWithAppending fails intermittently. Contributed by Mingliang Liu.

2016-04-18 Thread aengineer
HDFS-10283. 
o.a.h.hdfs.server.namenode.TestFSImageWithSnapshot#testSaveLoadImageWithAppending
 fails intermittently. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89a83876
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89a83876
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89a83876

Branch: refs/heads/HDFS-7240
Commit: 89a838769ff5b6c64565e6949b14d7fed05daf54
Parents: 4feed9b
Author: Jing Zhao 
Authored: Fri Apr 15 10:49:21 2016 -0700
Committer: Jing Zhao 
Committed: Fri Apr 15 10:49:21 2016 -0700

--
 .../namenode/TestFSImageWithSnapshot.java   | 43 ++--
 1 file changed, 22 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89a83876/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
index 1904bbc..6be3950 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
+
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -62,7 +63,7 @@ public class TestFSImageWithSnapshot {
   }
 
   static final long seed = 0;
-  static final short REPLICATION = 3;
+  static final short NUM_DATANODES = 3;
   static final int BLOCKSIZE = 1024;
   static final long txid = 1;
 
@@ -78,7 +79,7 @@ public class TestFSImageWithSnapshot {
   @Before
   public void setUp() throws Exception {
 conf = new Configuration();
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES)
 .build();
 cluster.waitActive();
 fsn = cluster.getNamesystem();
@@ -177,7 +178,7 @@ public class TestFSImageWithSnapshot {
 
 cluster.shutdown();
 cluster = new MiniDFSCluster.Builder(conf).format(false)
-.numDataNodes(REPLICATION).build();
+.numDataNodes(NUM_DATANODES).build();
 cluster.waitActive();
 fsn = cluster.getNamesystem();
 hdfs = cluster.getFileSystem();
@@ -188,7 +189,7 @@ public class TestFSImageWithSnapshot {
 hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
 cluster.shutdown();
 cluster = new MiniDFSCluster.Builder(conf).format(false)
-.numDataNodes(REPLICATION).build();
+.numDataNodes(NUM_DATANODES).build();
 cluster.waitActive();
 fsn = cluster.getNamesystem();
 hdfs = cluster.getFileSystem();
@@ -215,7 +216,7 @@ public class TestFSImageWithSnapshot {
 hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
 cluster.shutdown();
 cluster = new MiniDFSCluster.Builder(conf).format(false)
-.numDataNodes(REPLICATION).build();
+.numDataNodes(NUM_DATANODES).build();
 cluster.waitActive();
 fsn = cluster.getNamesystem();
 hdfs = cluster.getFileSystem();
@@ -248,20 +249,20 @@ public class TestFSImageWithSnapshot {
 hdfs.createSnapshot(dir, "s" + ++s);
 Path sub1file1 = new Path(sub1, "sub1file1");
 Path sub1file2 = new Path(sub1, "sub1file2");
-DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
-DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
+DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, (short) 1, seed);
+DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, (short) 1, seed);
 checkImage(s);
 
 hdfs.createSnapshot(dir, "s" + ++s);
 Path sub2 = new Path(dir, "sub2");
 Path sub2file1 = new Path(sub2, "sub2file1");
 Path sub2file2 = new Path(sub2, "sub2file2");
-DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, REPLICATION, seed);
-DFSTestUtil.createFile(hdfs, sub2file2, BLOCKSIZE, REPLICATION, seed);
+DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, (short) 1, seed);
+DFSTestUtil.createFile(hdfs, sub2file2, BLOCKSIZE, (short) 1, seed);
 checkImage(s);
 
 hdfs.createSnapshot(dir, "s" + ++s);
-hdfs.setReplication(sub1file1, (short) (REPLICATION - 1));
+hdfs.setReplication(sub1file1, (short) 1);
 hdfs.delete(sub1file2, true);
 

[17/27] hadoop git commit: YARN-4940. yarn node -list -all failed if RM start with decommissioned node. Contributed by sandflee

2016-04-18 Thread aengineer
YARN-4940. yarn node -list -all failed if RM start with decommissioned node. 
Contributed by sandflee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69f3d428
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69f3d428
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69f3d428

Branch: refs/heads/HDFS-7240
Commit: 69f3d428d5c3ab0c79cacffc22b1f59408622ae7
Parents: fdbafbc
Author: Jason Lowe 
Authored: Fri Apr 15 20:36:45 2016 +
Committer: Jason Lowe 
Committed: Fri Apr 15 20:36:45 2016 +

--
 .../resourcemanager/NodesListManager.java   | 36 ++
 .../resourcemanager/rmnode/RMNodeImpl.java  |  4 +-
 .../resourcemanager/TestClientRMService.java| 49 +++-
 .../resourcemanager/TestRMNodeTransitions.java  |  4 +-
 4 files changed, 55 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69f3d428/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
index ec2708e..121c418 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
@@ -163,7 +163,7 @@ public class NodesListManager extends CompositeService 
implements
   private void setDecomissionedNMs() {
 Set excludeList = hostsReader.getExcludedHosts();
 for (final String host : excludeList) {
-  UnknownNodeId nodeId = new UnknownNodeId(host);
+  NodeId nodeId = createUnknownNodeId(host);
   RMNodeImpl rmNode = new RMNodeImpl(nodeId,
   rmContext, host, -1, -1, new UnknownNode(host), null, null);
   rmContext.getInactiveRMNodes().put(nodeId, rmNode);
@@ -430,38 +430,8 @@ public class NodesListManager extends CompositeService 
implements
* A NodeId instance needed upon startup for populating inactive nodes Map.
* It only knows the hostname/ip and marks the port to -1 or invalid.
*/
-  public static class UnknownNodeId extends NodeId {
-
-private String host;
-
-public UnknownNodeId(String host) {
-  this.host = host;
-}
-
-@Override
-public String getHost() {
-  return this.host;
-}
-
-@Override
-protected void setHost(String hst) {
-
-}
-
-@Override
-public int getPort() {
-  return -1;
-}
-
-@Override
-protected void setPort(int port) {
-
-}
-
-@Override
-protected void build() {
-
-}
+  public static NodeId createUnknownNodeId(String host) {
+return NodeId.newInstance(host, -1);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69f3d428/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 5f8317e..9b80716 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -786,8 +786,8 @@ public class RMNodeImpl implements RMNode, 
EventHandler {
   if (previousRMNode != null) {
 rmNode.updateMetricsForRejoinedNode(previousRMNode.getState());
   } else {
-NodesListManager.UnknownNodeId unknownNodeId =
-new NodesListManager.UnknownNodeId(nodeId.getHost());
+NodeId unknownNodeId =
+NodesListManager.createUnknownNodeId(nodeId.getHost());
 previousRMNode =
 

[05/27] hadoop git commit: HDFS-10280. Document new dfsadmin command -evictWriters. Contributed by Wei-Chiu Chuang.

2016-04-18 Thread aengineer
HDFS-10280. Document new dfsadmin command -evictWriters. Contributed by 
Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c970f1d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c970f1d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c970f1d0

Branch: refs/heads/HDFS-7240
Commit: c970f1d00525e4273075cff7406dcbd71305abd5
Parents: 404f57f3
Author: Kihwal Lee 
Authored: Thu Apr 14 12:45:47 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 12:45:47 2016 -0500

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java | 4 
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 2 ++
 2 files changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c970f1d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index a35246f..08d3da5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -427,6 +427,7 @@ public class DFSAdmin extends FsShell {
 "\t[-allowSnapshot ]\n" +
 "\t[-disallowSnapshot ]\n" +
 "\t[-shutdownDatanode  [upgrade]]\n" +
+"\t[-evictWriters ]\n" +
 "\t[-getDatanodeInfo ]\n" +
 "\t[-metasave filename]\n" +
 "\t[-triggerBlockReport [-incremental] ]\n" +
@@ -1829,6 +1830,9 @@ public class DFSAdmin extends FsShell {
 } else if ("-shutdownDatanode".equals(cmd)) {
   System.err.println("Usage: hdfs dfsadmin"
   + " [-shutdownDatanode  [upgrade]]");
+} else if ("-evictWriters".equals(cmd)) {
+  System.err.println("Usage: hdfs dfsadmin"
+  + " [-evictWriters ]");
 } else if ("-getDatanodeInfo".equals(cmd)) {
   System.err.println("Usage: hdfs dfsadmin"
   + " [-getDatanodeInfo ]");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c970f1d0/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 035abd6..a6c8b4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -385,6 +385,7 @@ Usage:
 hdfs dfsadmin [-allowSnapshot ]
 hdfs dfsadmin [-disallowSnapshot ]
 hdfs dfsadmin [-shutdownDatanode  [upgrade]]
+hdfs dfsadmin [-evictWriters ]
 hdfs dfsadmin [-getDatanodeInfo ]
 hdfs dfsadmin [-metasave filename]
 hdfs dfsadmin [-triggerBlockReport [-incremental] 
]
@@ -419,6 +420,7 @@ Usage:
 | `-allowSnapshot` \ | Allowing snapshots of a directory to be 
created. If the operation completes successfully, the directory becomes 
snapshottable. See the [HDFS Snapshot Documentation](./HdfsSnapshots.html) for 
more information. |
 | `-disallowSnapshot` \ | Disallowing snapshots of a directory 
to be created. All snapshots of the directory must be deleted before 
disallowing snapshots. See the [HDFS Snapshot 
Documentation](./HdfsSnapshots.html) for more information. |
 | `-shutdownDatanode` \ [upgrade] | Submit a 
shutdown request for the given datanode. See [Rolling Upgrade 
document](./HdfsRollingUpgrade.html#dfsadmin_-shutdownDatanode) for the detail. 
|
+| `-evictWriters` \ | Make the datanode evict all 
clients that are writing a block. This is useful if decommissioning is hung due 
to slow writers. |
 | `-getDatanodeInfo` \ | Get the information about 
the given datanode. See [Rolling Upgrade 
document](./HdfsRollingUpgrade.html#dfsadmin_-getDatanodeInfo) for the detail. |
 | `-metasave` filename | Save Namenode's primary data structures to *filename* 
in the directory specified by hadoop.log.dir property. *filename* is 
overwritten if it exists. *filename* will contain one line for each of the 
following1. Datanodes heart beating with Namenode2. Blocks waiting to 
be replicated3. Blocks currently being replicated4. Blocks waiting to 
be deleted |
 | `-triggerBlockReport` `[-incremental]` \ | 
Trigger a block report for the given datanode. If 'incremental' is specified, 
it will be otherwise, it will be a full block report. |



[11/27] hadoop git commit: HDFS-10281. TestPendingCorruptDnMessages fails intermittently. Contributed by Mingliang Liu.

2016-04-18 Thread aengineer
HDFS-10281. TestPendingCorruptDnMessages fails intermittently. Contributed by 
Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9c9d035
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9c9d035
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9c9d035

Branch: refs/heads/HDFS-7240
Commit: b9c9d03591a49be31f3fbc738d01a31700bfdbc4
Parents: 2c155af
Author: Kihwal Lee 
Authored: Thu Apr 14 15:24:39 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 15:24:39 2016 -0500

--
 .../ha/TestPendingCorruptDnMessages.java| 51 +++-
 1 file changed, 28 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9c9d035/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
index 5f116d9..5063acd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.io.OutputStream;
 import java.net.URISyntaxException;
 import java.util.List;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -37,19 +39,22 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.util.ThreadUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import com.google.common.base.Supplier;
+
 import org.junit.Test;
 
 public class TestPendingCorruptDnMessages {
   
   private static final Path filePath = new Path("/foo.txt");
   
-  @Test
+  @Test (timeout = 6)
   public void testChangedStorageId() throws IOException, URISyntaxException,
-  InterruptedException {
+  InterruptedException, TimeoutException {
 HdfsConfiguration conf = new HdfsConfiguration();
 conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
-MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(1)
 .nnTopology(MiniDFSNNTopology.simpleHATopology())
 .build();
@@ -83,27 +88,27 @@ public class TestPendingCorruptDnMessages {
   
   // Wait until the standby NN queues up the corrupt block in the pending 
DN
   // message queue.
-  while (cluster.getNamesystem(1).getBlockManager()
-  .getPendingDataNodeMessageCount() < 1) {
-ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
-  }
-  
-  assertEquals(1, cluster.getNamesystem(1).getBlockManager()
-  .getPendingDataNodeMessageCount());
-  String oldStorageId = getRegisteredDatanodeUid(cluster, 1);
+  GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  return cluster.getNamesystem(1).getBlockManager()
+  .getPendingDataNodeMessageCount() == 1;
+}
+  }, 1000, 3);
+
+  final String oldStorageId = getRegisteredDatanodeUid(cluster, 1);
+  assertNotNull(oldStorageId);
   
   // Reformat/restart the DN.
   assertTrue(wipeAndRestartDn(cluster, 0));
   
-  // Give the DN time to start up and register, which will cause the
-  // DatanodeManager to dissociate the old storage ID from the DN xfer 
addr.
-  String newStorageId = "";
-  do {
-ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
-newStorageId = getRegisteredDatanodeUid(cluster, 1);
-System.out.println("> oldStorageId: " + oldStorageId +
-" newStorageId: " + newStorageId);
-  } while (newStorageId.equals(oldStorageId));
+  GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  final String newStorageId = getRegisteredDatanodeUid(cluster, 1);
+  return newStorageId != null && 

[10/27] hadoop git commit: HDFS-10292. Add block id when client got Unable to close file exception. Contributed by Brahma Reddy Battula.

2016-04-18 Thread aengineer
HDFS-10292. Add block id when client got Unable to close file exception. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c155afe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c155afe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c155afe

Branch: refs/heads/HDFS-7240
Commit: 2c155afe2736a5571bbb3bdfb2fe6f9709227229
Parents: 3150ae8
Author: Kihwal Lee 
Authored: Thu Apr 14 14:25:11 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 14:25:11 2016 -0500

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c155afe/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index dc88e08..0f82799 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -811,7 +811,7 @@ public class DFSOutputStream extends FSOutputSummer
 try {
   if (retries == 0) {
 throw new IOException("Unable to close file because the last block"
-+ " does not have enough number of replicas.");
++ last + " does not have enough number of replicas.");
   }
   retries--;
   Thread.sleep(sleeptime);



[01/27] hadoop git commit: HADOOP-12969 Mark IPC.Client and IPC.Server as @Public, @Evolving (Xiaobing Zhou via stevel)

2016-04-18 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 ce524c584 -> c884170c9


HADOOP-12969 Mark IPC.Client and IPC.Server as @Public, @Evolving (Xiaobing 
Zhou via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40211d1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40211d1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40211d1f

Branch: refs/heads/HDFS-7240
Commit: 40211d1f0a3e4546eab076e10be8937853490e5e
Parents: 27b131e
Author: Steve Loughran 
Authored: Thu Apr 14 10:35:04 2016 +0100
Committer: Steve Loughran 
Committed: Thu Apr 14 10:35:31 2016 +0100

--
 .../src/main/java/org/apache/hadoop/ipc/Client.java | 3 ++-
 .../src/main/java/org/apache/hadoop/ipc/Server.java | 5 +++--
 2 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40211d1f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 489c354..f206861 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -61,6 +61,7 @@ import javax.security.sasl.Sasl;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -107,7 +108,7 @@ import com.google.protobuf.CodedOutputStream;
  * 
  * @see Server
  */
-@InterfaceAudience.LimitedPrivate(value = { "Common", "HDFS", "MapReduce", 
"Yarn" })
+@Public
 @InterfaceStability.Evolving
 public class Client implements AutoCloseable {
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40211d1f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index eb28ad5..1cc9f1d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -74,6 +74,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
@@ -133,7 +134,7 @@ import com.google.protobuf.Message.Builder;
  * 
  * @see Client
  */
-@InterfaceAudience.LimitedPrivate(value = { "Common", "HDFS", "MapReduce", 
"Yarn" })
+@Public
 @InterfaceStability.Evolving
 public abstract class Server {
   private final boolean authorize;
@@ -439,7 +440,7 @@ public abstract class Server {
 
   /**
* Checks if LogSlowRPC is set true.
-   * @return
+   * @return true, if LogSlowRPC is set true, false, otherwise.
*/
   protected boolean isLogSlowRPC() {
 return logSlowRPC;



[25/27] hadoop git commit: Fixed TimelineClient to retry SocketTimeoutException too. Contributed by Xuan Gong.

2016-04-18 Thread aengineer
Fixed TimelineClient to retry SocketTimeoutException too. Contributed by Xuan 
Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47700373
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47700373
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47700373

Branch: refs/heads/HDFS-7240
Commit: 477003730e6a7c7eff11892f5cedf74073ca867b
Parents: d8b729e
Author: Vinod Kumar Vavilapalli 
Authored: Mon Apr 18 11:47:06 2016 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Mon Apr 18 11:47:06 2016 -0700

--
 .../client/api/impl/TimelineClientImpl.java | 74 ++--
 .../client/api/impl/TestTimelineClient.java | 41 +++
 2 files changed, 93 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47700373/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index ef46229..8c60041 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -24,6 +24,7 @@ import java.lang.reflect.UndeclaredThrowableException;
 import java.net.ConnectException;
 import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
+import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.URL;
 import java.net.URLConnection;
@@ -116,7 +117,9 @@ public class TimelineClientImpl extends TimelineClient {
   TimelineClientConnectionRetry connectionRetry;
 
   // Abstract class for an operation that should be retried by timeline client
-  private static abstract class TimelineClientRetryOp {
+  @Private
+  @VisibleForTesting
+  public static abstract class TimelineClientRetryOp {
 // The operation that should be retried
 public abstract Object run() throws IOException;
 // The method to indicate if we should retry given the incoming exception
@@ -449,27 +452,8 @@ public class TimelineClientImpl extends TimelineClient {
   final PrivilegedExceptionAction action)
   throws IOException, YarnException {
 // Set up the retry operation
-TimelineClientRetryOp tokenRetryOp = new TimelineClientRetryOp() {
-
-  @Override
-  public Object run() throws IOException {
-// Try pass the request, if fail, keep retrying
-authUgi.checkTGTAndReloginFromKeytab();
-try {
-  return authUgi.doAs(action);
-} catch (UndeclaredThrowableException e) {
-  throw new IOException(e.getCause());
-} catch (InterruptedException e) {
-  throw new IOException(e);
-}
-  }
-
-  @Override
-  public boolean shouldRetryOn(Exception e) {
-// Only retry on connection exceptions
-return (e instanceof ConnectException);
-  }
-};
+TimelineClientRetryOp tokenRetryOp =
+createTimelineClientRetryOpForOperateDelegationToken(action);
 
 return connectionRetry.retryOn(tokenRetryOp);
   }
@@ -680,4 +664,50 @@ public class TimelineClientImpl extends TimelineClient {
   public void setTimelineWriter(TimelineWriter writer) {
 this.timelineWriter = writer;
   }
+
+  @Private
+  @VisibleForTesting
+  public TimelineClientRetryOp
+  createTimelineClientRetryOpForOperateDelegationToken(
+  final PrivilegedExceptionAction action) throws IOException {
+return new TimelineClientRetryOpForOperateDelegationToken(
+this.authUgi, action);
+  }
+
+  @Private
+  @VisibleForTesting
+  public class TimelineClientRetryOpForOperateDelegationToken
+  extends TimelineClientRetryOp {
+
+private final UserGroupInformation authUgi;
+private final PrivilegedExceptionAction action;
+
+public TimelineClientRetryOpForOperateDelegationToken(
+UserGroupInformation authUgi, PrivilegedExceptionAction action) {
+  this.authUgi = authUgi;
+  this.action = action;
+}
+
+@Override
+public Object run() throws IOException {
+  // Try pass the request, if fail, keep retrying
+  authUgi.checkTGTAndReloginFromKeytab();
+  try {
+return authUgi.doAs(action);
+  } catch (UndeclaredThrowableException e) {
+throw new IOException(e.getCause());
+  } catch (InterruptedException e) {
+throw new 

[08/27] hadoop git commit: HADOOP-12811. Change kms server port number which conflicts with HMaster port number. Contributed by Xiao Chen.

2016-04-18 Thread aengineer
HADOOP-12811. Change kms server port number which conflicts with HMaster port 
number. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a74580a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a74580a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a74580a4

Branch: refs/heads/HDFS-7240
Commit: a74580a4d3039ff95e7744f1d7a386b2bc7a7484
Parents: 8b2880c
Author: Andrew Wang 
Authored: Thu Apr 14 11:36:12 2016 -0700
Committer: Andrew Wang 
Committed: Thu Apr 14 11:36:12 2016 -0700

--
 .../crypto/key/kms/TestLoadBalancingKMSClientProvider.java   | 8 
 hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh| 2 +-
 .../hadoop-kms/src/main/libexec/kms-config.sh| 2 +-
 .../hadoop-kms/src/site/markdown/index.md.vm | 4 ++--
 4 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a74580a4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
index 08a3d93..4e421da 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
@@ -60,14 +60,14 @@ public class TestLoadBalancingKMSClientProvider {
 providers[2].getKMSUrl()));
 
 kp = new KMSClientProvider.Factory().createProvider(new URI(
-"kms://http@host1;host2;host3:16000/kms/foo"), conf);
+"kms://http@host1;host2;host3:9600/kms/foo"), conf);
 assertTrue(kp instanceof LoadBalancingKMSClientProvider);
 providers =
 ((LoadBalancingKMSClientProvider) kp).getProviders();
 assertEquals(3, providers.length);
-assertEquals(Sets.newHashSet("http://host1:16000/kms/foo/v1/;,
-"http://host2:16000/kms/foo/v1/;,
-"http://host3:16000/kms/foo/v1/;),
+assertEquals(Sets.newHashSet("http://host1:9600/kms/foo/v1/;,
+"http://host2:9600/kms/foo/v1/;,
+"http://host3:9600/kms/foo/v1/;),
 Sets.newHashSet(providers[0].getKMSUrl(),
 providers[1].getKMSUrl(),
 providers[2].getKMSUrl()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a74580a4/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
--
diff --git a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh 
b/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
index 7044fa8..729e63a 100644
--- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
+++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
@@ -24,7 +24,7 @@
 
 # The HTTP port used by KMS
 #
-# export KMS_HTTP_PORT=16000
+# export KMS_HTTP_PORT=9600
 
 # The Admin port used by KMS
 #

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a74580a4/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh
--
diff --git a/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh 
b/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh
index 5e1ffa4..927b4af 100644
--- a/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh
+++ b/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh
@@ -37,7 +37,7 @@ function hadoop_subproject_init
   export HADOOP_CATALINA_CONFIG="${HADOOP_CONF_DIR}"
   export HADOOP_CATALINA_LOG="${HADOOP_LOG_DIR}"
 
-  export HADOOP_CATALINA_HTTP_PORT="${KMS_HTTP_PORT:-16000}"
+  export HADOOP_CATALINA_HTTP_PORT="${KMS_HTTP_PORT:-9600}"
   export 
HADOOP_CATALINA_ADMIN_PORT="${KMS_ADMIN_PORT:-$((HADOOP_CATALINA_HTTP_PORT+1))}"
   export HADOOP_CATALINA_MAX_THREADS="${KMS_MAX_THREADS:-1000}"
   export 
HADOOP_CATALINA_MAX_HTTP_HEADER_SIZE="${KMS_MAX_HTTP_HEADER_SIZE:-65536}"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a74580a4/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
--
diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm 
b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
index 65854cf..6866367 100644
--- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
+++ 

[09/27] hadoop git commit: YARN-4924. NM recovery race can lead to container not cleaned up. Contributed by sandflee

2016-04-18 Thread aengineer
YARN-4924. NM recovery race can lead to container not cleaned up. Contributed 
by sandflee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3150ae81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3150ae81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3150ae81

Branch: refs/heads/HDFS-7240
Commit: 3150ae8108a1fc40a67926be6254824c1e37cb38
Parents: a74580a
Author: Jason Lowe 
Authored: Thu Apr 14 19:17:14 2016 +
Committer: Jason Lowe 
Committed: Thu Apr 14 19:17:14 2016 +

--
 .../containermanager/ContainerManagerImpl.java  | 17 -
 .../recovery/NMLeveldbStateStoreService.java| 80 
 .../recovery/NMNullStateStoreService.java   |  4 -
 .../recovery/NMStateStoreService.java   | 12 ---
 .../TestContainerManagerRecovery.java   |  4 +
 .../recovery/NMMemoryStateStoreService.java | 10 ---
 .../TestNMLeveldbStateStoreService.java | 10 +--
 7 files changed, 54 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3150ae81/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 8d09aa7..b8cca28 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -296,20 +296,8 @@ public class ContainerManagerImpl extends CompositeService 
implements
 if (LOG.isDebugEnabled()) {
   LOG.debug("Recovering container with state: " + rcs);
 }
-
 recoverContainer(rcs);
   }
-
-  String diagnostic = "Application marked finished during recovery";
-  for (ApplicationId appId : appsState.getFinishedApplications()) {
-
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Application marked finished during recovery: " + appId);
-}
-
-dispatcher.getEventHandler().handle(
-new ApplicationFinishEvent(appId, diagnostic));
-  }
 } else {
   LOG.info("Not a recoverable state store. Nothing to recover.");
 }
@@ -1332,11 +1320,6 @@ public class ContainerManagerImpl extends 
CompositeService implements
 } else if (appsFinishedEvent.getReason() == 
CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER) {
   diagnostic = "Application killed by ResourceManager";
 }
-try {
-  this.context.getNMStateStore().storeFinishedApplication(appID);
-} catch (IOException e) {
-  LOG.error("Unable to update application state in store", e);
-}
 this.dispatcher.getEventHandler().handle(
 new ApplicationFinishEvent(appID,
 diagnostic));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3150ae81/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index 81d6c57..26dea2d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -84,6 +84,7 @@ public class NMLeveldbStateStoreService extends 
NMStateStoreService {
 
   private static final String APPLICATIONS_KEY_PREFIX =
   "ContainerManager/applications/";
+  @Deprecated
   private static final 

[27/27] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2016-04-18 Thread aengineer
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c884170c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c884170c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c884170c

Branch: refs/heads/HDFS-7240
Commit: c884170c9338534229a0872087ae64fc9db70b05
Parents: ce524c5 cb3ca46
Author: Anu Engineer 
Authored: Mon Apr 18 16:58:45 2016 -0700
Committer: Anu Engineer 
Committed: Mon Apr 18 16:58:45 2016 -0700

--
 .../client/KerberosAuthenticator.java   |   6 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |   3 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |   5 +-
 .../src/main/resources/core-default.xml |   7 +
 .../kms/TestLoadBalancingKMSClientProvider.java |   8 +-
 .../apache/hadoop/fs/shell/find/TestAnd.java|  25 +-
 .../fs/shell/find/TestFilterExpression.java |  27 +-
 .../apache/hadoop/fs/shell/find/TestFind.java   |   3 +-
 .../apache/hadoop/fs/shell/find/TestIname.java  |  17 +-
 .../apache/hadoop/fs/shell/find/TestName.java   |  17 +-
 .../apache/hadoop/fs/shell/find/TestPrint.java  |   9 +-
 .../apache/hadoop/fs/shell/find/TestPrint0.java |   9 +-
 .../apache/hadoop/fs/shell/find/TestResult.java |  41 +-
 .../hadoop-kms/src/main/conf/kms-env.sh |   2 +-
 .../hadoop-kms/src/main/libexec/kms-config.sh   |   2 +-
 .../hadoop-kms/src/site/markdown/index.md.vm|   4 +-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |   2 +-
 .../server/blockmanagement/BlockManager.java|  17 +
 .../BlockPlacementPolicyDefault.java|   3 +-
 .../hdfs/server/datanode/DirectoryScanner.java  |  14 +-
 .../hdfs/server/datanode/VolumeScanner.java |   2 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  12 +
 .../hdfs/server/namenode/FSEditLogOp.java   |   3 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   4 +
 .../src/site/markdown/HDFSCommands.md   |   2 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  12 +
 .../apache/hadoop/hdfs/StripedFileTestUtil.java |  21 +-
 .../org/apache/hadoop/hdfs/TestGetBlocks.java   |   8 +-
 .../server/datanode/FsDatasetTestUtils.java |   7 +
 .../hdfs/server/datanode/TestBlockScanner.java  |  63 +++
 .../server/datanode/TestDataNodeMetrics.java|   6 +-
 .../fsdataset/impl/FsDatasetImplTestUtils.java  |  21 +
 .../namenode/TestFSImageWithSnapshot.java   |  43 +-
 .../ha/TestPendingCorruptDnMessages.java|  51 ++-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  |   2 +-
 .../jobhistory/JobHistoryEventHandler.java  |   9 +-
 .../v2/app/job/event/TaskAttemptKillEvent.java  |  15 +-
 .../app/job/event/TaskTAttemptKilledEvent.java  |  40 ++
 .../mapreduce/v2/app/job/impl/JobImpl.java  |   4 +-
 .../v2/app/job/impl/TaskAttemptImpl.java|  48 +-
 .../mapreduce/v2/app/job/impl/TaskImpl.java |  25 +-
 .../v2/app/rm/RMContainerAllocator.java |   4 +-
 .../hadoop/mapreduce/v2/app/TestMRApp.java  |  51 ++-
 .../v2/app/job/impl/TestTaskAttempt.java|  87 +++-
 .../mapreduce/v2/app/job/impl/TestTaskImpl.java |  75 +++-
 .../hadoop/mapreduce/v2/hs/CompletedJob.java|   9 +-
 .../mapreduce/v2/hs/TestJobHistoryEntities.java |  27 ++
 hadoop-project/src/site/site.xml|   1 +
 .../org/apache/hadoop/fs/s3a/Constants.java |  10 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  10 +
 .../src/site/markdown/tools/hadoop-aws/index.md |   7 +
 .../hadoop/fs/s3a/TestS3AConfiguration.java |  47 +-
 .../apache/hadoop/tools/SimpleCopyListing.java  |   2 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java |  38 ++
 .../pom.xml |   6 +
 .../distributedshell/ApplicationMaster.java |  10 +-
 .../distributedshell/TestDistributedShell.java  |  50 +++
 .../client/api/impl/TimelineClientImpl.java |  74 ++-
 .../client/api/impl/TestTimelineClient.java |  43 +-
 .../hadoop/yarn/webapp/JerseyTestBase.java  |  18 +-
 .../containermanager/ContainerManagerImpl.java  |  17 -
 .../recovery/NMLeveldbStateStoreService.java|  80 ++--
 .../recovery/NMNullStateStoreService.java   |   4 -
 .../recovery/NMStateStoreService.java   |  12 -
 .../TestContainerManagerRecovery.java   |   4 +
 .../recovery/NMMemoryStateStoreService.java |  10 -
 .../TestNMLeveldbStateStoreService.java |  10 +-
 .../resourcemanager/NodesListManager.java   |  36 +-
 .../resourcemanager/rmnode/RMNodeImpl.java  |   4 +-
 .../scheduler/capacity/LeafQueue.java   |   7 -
 .../scheduler/common/fica/FiCaSchedulerApp.java |   2 +
 .../resourcemanager/TestClientRMService.java|  49 +-
 .../resourcemanager/TestRMNodeTransitions.java  |   4 +-
 .../capacity/TestContainerAllocation.java   | 188 +++-
 .../src/site/markdown/ReservationSystem.md  |  65 +++

[24/27] hadoop git commit: HDFS-10302. BlockPlacementPolicyDefault should use default replication considerload value. Contributed by Lin Yiqun.

2016-04-18 Thread aengineer
HDFS-10302. BlockPlacementPolicyDefault should use default replication 
considerload value. Contributed by  Lin Yiqun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8b729e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8b729e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8b729e1

Branch: refs/heads/HDFS-7240
Commit: d8b729e16fb253e6c84f414d419b5663d9219a43
Parents: ab90302
Author: Kihwal Lee 
Authored: Mon Apr 18 07:58:55 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 18 07:58:55 2016 -0500

--
 .../hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8b729e1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index f20f5fb..474a5e7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -79,7 +79,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
  NetworkTopology clusterMap, 
  Host2NodesMap host2datanodeMap) {
 this.considerLoad = conf.getBoolean(
-DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT);
 this.considerLoadFactor = conf.getDouble(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);



hadoop git commit: HDFS-10232. Ozone: Make config key naming consistent. Contributed by Anu Engineer.

2016-04-18 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 ae6c0e839 -> ce524c584


HDFS-10232. Ozone: Make config key naming consistent. Contributed by Anu 
Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce524c58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce524c58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce524c58

Branch: refs/heads/HDFS-7240
Commit: ce524c584bf3429975d71005d01700f3a56afa12
Parents: ae6c0e8
Author: Anu Engineer 
Authored: Mon Apr 18 16:52:40 2016 -0700
Committer: Anu Engineer 
Committed: Mon Apr 18 16:52:40 2016 -0700

--
 .../hadoop/hdfs/server/datanode/DataNode.java   | 18 +++---
 .../server/datanode/ObjectStoreHandler.java | 34 ++--
 .../apache/hadoop/ozone/OzoneConfigKeys.java| 58 ++--
 .../container/common/impl/KeyManagerImpl.java   |  4 +-
 .../common/transport/client/XceiverClient.java  |  4 +-
 .../common/transport/server/XceiverServer.java  |  4 +-
 .../container/ozoneimpl/OzoneContainer.java |  2 +-
 .../ozone/storage/StorageContainerManager.java  | 24 
 .../web/localstorage/OzoneMetadataManager.java  |  4 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  4 +-
 .../common/impl/TestContainerPersistence.java   | 10 ++--
 .../container/ozoneimpl/TestOzoneContainer.java | 30 --
 .../transport/server/TestContainerServer.java   |  4 +-
 .../storage/TestStorageContainerManager.java|  6 +-
 .../ozone/web/TestOzoneRestWithMiniCluster.java |  6 +-
 .../hadoop/ozone/web/TestOzoneVolumes.java  | 16 +++---
 .../hadoop/ozone/web/TestOzoneWebAccess.java| 10 ++--
 .../hadoop/ozone/web/client/TestBuckets.java| 14 ++---
 .../hadoop/ozone/web/client/TestKeys.java   | 16 +++---
 .../hadoop/ozone/web/client/TestVolume.java | 16 +++---
 20 files changed, 139 insertions(+), 145 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce524c58/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 6bcbd71..5ba3b45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -48,8 +48,8 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEF
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY;
-import static 
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_DEFAULT;
-import static 
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -188,7 +188,6 @@ import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SaslPropertiesResolver;
@@ -456,9 +455,8 @@ public class DataNode extends ReconfigurableBase
 this.pipelineSupportECN = conf.getBoolean(
 DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED,
 DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED_DEFAULT);
-this.ozoneEnabled = conf.getBoolean(OzoneConfigKeys
-.DFS_OBJECTSTORE_ENABLED_KEY, OzoneConfigKeys
-.DFS_OBJECTSTORE_ENABLED_DEFAULT);
+this.ozoneEnabled = conf.getBoolean(OZONE_ENABLED,
+OZONE_ENABLED_DEFAULT);
 
 confVersion = "core-" +
 conf.get("hadoop.common.configuration.version", "UNSPECIFIED") +
@@ -1294,7 +1292,7 @@ public class DataNode extends ReconfigurableBase
 // global DN settings
 registerMXBean();
 initDataXceiver(conf);
-initObjectStoreHandler(conf);
+initObjectStoreHandler();
 startInfoServer(conf);
 pauseMonitor = new JvmPauseMonitor();
 pauseMonitor.init(conf);
@@ -1331,12 +1329,10 @@ public class DataNode extends 

hadoop git commit: HDFS-10265. OEV tool fails to read edit xml file if OP_UPDATE_BLOCKS has no BLOCK tag (Wan Chang via cmccabe)

2016-04-18 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 92e45b832 -> e1d9e5ab8


HDFS-10265. OEV tool fails to read edit xml file if OP_UPDATE_BLOCKS has no 
BLOCK tag (Wan Chang via cmccabe)

(cherry picked from commit cb3ca460efb97be8c031bdb14bb7705cc25f2117)
(cherry picked from commit a69b6b1e8b1e734ba9a106061cc6d22583d1d2d1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1d9e5ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1d9e5ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1d9e5ab

Branch: refs/heads/branch-2.8
Commit: e1d9e5ab828d7de265e7cbd280d1cf4db968f1e9
Parents: 92e45b8
Author: Colin Patrick Mccabe 
Authored: Mon Apr 18 11:45:18 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Mon Apr 18 13:49:05 2016 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSEditLogOp.java |  3 ++-
 .../test/java/org/apache/hadoop/hdfs/DFSTestUtil.java   | 12 
 2 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1d9e5ab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 2922031..41be440 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -1113,7 +1113,8 @@ public abstract class FSEditLogOp {
 
 @Override void fromXml(Stanza st) throws InvalidXmlException {
   this.path = st.getValue("PATH");
-  List blocks = st.getChildren("BLOCK");
+  List blocks = st.hasChildren("BLOCK") ?
+  st.getChildren("BLOCK") : new ArrayList();
   this.blocks = new Block[blocks.size()];
   for (int i = 0; i < blocks.size(); i++) {
 this.blocks[i] = FSEditLogOp.blockFromXml(blocks.get(i));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1d9e5ab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index da247f7..d6dcf0c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -1267,6 +1267,18 @@ public class DFSTestUtil {
 // OP_APPEND 47
 FSDataOutputStream s2 = filesystem.append(pathFileCreate, 4096, null);
 s2.close();
+
+// OP_UPDATE_BLOCKS 25
+final String updateBlockFile = "/update_blocks";
+FSDataOutputStream fout = filesystem.create(new Path(updateBlockFile), 
true, 4096, (short)1, 4096L);
+fout.write(1);
+fout.hflush();
+long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();
+DFSClient dfsclient = DFSClientAdapter.getDFSClient(filesystem);
+LocatedBlocks blocks = 
dfsclient.getNamenode().getBlockLocations(updateBlockFile, 0, 
Integer.MAX_VALUE);
+dfsclient.getNamenode().abandonBlock(blocks.get(0).getBlock(), fileId, 
updateBlockFile, dfsclient.clientName);
+fout.close();
+
 // OP_SET_STORAGE_POLICY 45
 filesystem.setStoragePolicy(pathFileCreate,
 HdfsConstants.HOT_STORAGE_POLICY_NAME);



hadoop git commit: HDFS-10265. OEV tool fails to read edit xml file if OP_UPDATE_BLOCKS has no BLOCK tag (Wan Chang via cmccabe)

2016-04-18 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 342bb438f -> a69b6b1e8


HDFS-10265. OEV tool fails to read edit xml file if OP_UPDATE_BLOCKS has no 
BLOCK tag (Wan Chang via cmccabe)

(cherry picked from commit cb3ca460efb97be8c031bdb14bb7705cc25f2117)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a69b6b1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a69b6b1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a69b6b1e

Branch: refs/heads/branch-2
Commit: a69b6b1e8b1e734ba9a106061cc6d22583d1d2d1
Parents: 342bb43
Author: Colin Patrick Mccabe 
Authored: Mon Apr 18 11:45:18 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Mon Apr 18 13:48:24 2016 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSEditLogOp.java |  3 ++-
 .../test/java/org/apache/hadoop/hdfs/DFSTestUtil.java   | 12 
 2 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a69b6b1e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 6714741..ae9bfe1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -1096,7 +1096,8 @@ public abstract class FSEditLogOp {
 
 @Override void fromXml(Stanza st) throws InvalidXmlException {
   this.path = st.getValue("PATH");
-  List blocks = st.getChildren("BLOCK");
+  List blocks = st.hasChildren("BLOCK") ?
+  st.getChildren("BLOCK") : new ArrayList();
   this.blocks = new Block[blocks.size()];
   for (int i = 0; i < blocks.size(); i++) {
 this.blocks[i] = FSEditLogOp.blockFromXml(blocks.get(i));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a69b6b1e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 95665d7..0d97369 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -1270,6 +1270,18 @@ public class DFSTestUtil {
 // OP_APPEND 47
 FSDataOutputStream s2 = filesystem.append(pathFileCreate, 4096, null);
 s2.close();
+
+// OP_UPDATE_BLOCKS 25
+final String updateBlockFile = "/update_blocks";
+FSDataOutputStream fout = filesystem.create(new Path(updateBlockFile), 
true, 4096, (short)1, 4096L);
+fout.write(1);
+fout.hflush();
+long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();
+DFSClient dfsclient = DFSClientAdapter.getDFSClient(filesystem);
+LocatedBlocks blocks = 
dfsclient.getNamenode().getBlockLocations(updateBlockFile, 0, 
Integer.MAX_VALUE);
+dfsclient.getNamenode().abandonBlock(blocks.get(0).getBlock(), fileId, 
updateBlockFile, dfsclient.clientName);
+fout.close();
+
 // OP_SET_STORAGE_POLICY 45
 filesystem.setStoragePolicy(pathFileCreate,
 HdfsConstants.HOT_STORAGE_POLICY_NAME);



hadoop git commit: HDFS-10265. OEV tool fails to read edit xml file if OP_UPDATE_BLOCKS has no BLOCK tag (Wan Chang via cmccabe)

2016-04-18 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 477003730 -> cb3ca460e


HDFS-10265. OEV tool fails to read edit xml file if OP_UPDATE_BLOCKS has no 
BLOCK tag (Wan Chang via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb3ca460
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb3ca460
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb3ca460

Branch: refs/heads/trunk
Commit: cb3ca460efb97be8c031bdb14bb7705cc25f2117
Parents: 4770037
Author: Colin Patrick Mccabe 
Authored: Mon Apr 18 11:45:18 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Mon Apr 18 13:47:56 2016 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSEditLogOp.java |  3 ++-
 .../test/java/org/apache/hadoop/hdfs/DFSTestUtil.java   | 12 
 2 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3ca460/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index c4e1a78..a3285a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -1096,7 +1096,8 @@ public abstract class FSEditLogOp {
 
 @Override void fromXml(Stanza st) throws InvalidXmlException {
   this.path = st.getValue("PATH");
-  List blocks = st.getChildren("BLOCK");
+  List blocks = st.hasChildren("BLOCK") ?
+  st.getChildren("BLOCK") : new ArrayList();
   this.blocks = new Block[blocks.size()];
   for (int i = 0; i < blocks.size(); i++) {
 this.blocks[i] = FSEditLogOp.blockFromXml(blocks.get(i));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3ca460/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 8a52bbb..d159fc5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -1272,6 +1272,18 @@ public class DFSTestUtil {
 // OP_APPEND 47
 FSDataOutputStream s2 = filesystem.append(pathFileCreate, 4096, null);
 s2.close();
+
+// OP_UPDATE_BLOCKS 25
+final String updateBlockFile = "/update_blocks";
+FSDataOutputStream fout = filesystem.create(new Path(updateBlockFile), 
true, 4096, (short)1, 4096L);
+fout.write(1);
+fout.hflush();
+long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();
+DFSClient dfsclient = DFSClientAdapter.getDFSClient(filesystem);
+LocatedBlocks blocks = 
dfsclient.getNamenode().getBlockLocations(updateBlockFile, 0, 
Integer.MAX_VALUE);
+dfsclient.getNamenode().abandonBlock(blocks.get(0).getBlock(), fileId, 
updateBlockFile, dfsclient.clientName);
+fout.close();
+
 // OP_SET_STORAGE_POLICY 45
 filesystem.setStoragePolicy(pathFileCreate,
 HdfsConstants.HOT_STORAGE_POLICY_NAME);



hadoop git commit: Fixed TimelineClient to retry SocketTimeoutException too. Contributed by Xuan Gong.

2016-04-18 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7ac2e5ec7 -> 92e45b832


Fixed TimelineClient to retry SocketTimeoutException too. Contributed by Xuan 
Gong.

(cherry picked from commit 477003730e6a7c7eff11892f5cedf74073ca867b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92e45b83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92e45b83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92e45b83

Branch: refs/heads/branch-2.8
Commit: 92e45b83219a230e1ef7071c9aeb598f67c58896
Parents: 7ac2e5e
Author: Vinod Kumar Vavilapalli 
Authored: Mon Apr 18 11:47:06 2016 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Mon Apr 18 11:51:21 2016 -0700

--
 .../client/api/impl/TimelineClientImpl.java | 74 ++--
 .../client/api/impl/TestTimelineClient.java | 41 +++
 2 files changed, 93 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e45b83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index ef46229..8c60041 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -24,6 +24,7 @@ import java.lang.reflect.UndeclaredThrowableException;
 import java.net.ConnectException;
 import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
+import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.URL;
 import java.net.URLConnection;
@@ -116,7 +117,9 @@ public class TimelineClientImpl extends TimelineClient {
   TimelineClientConnectionRetry connectionRetry;
 
   // Abstract class for an operation that should be retried by timeline client
-  private static abstract class TimelineClientRetryOp {
+  @Private
+  @VisibleForTesting
+  public static abstract class TimelineClientRetryOp {
 // The operation that should be retried
 public abstract Object run() throws IOException;
 // The method to indicate if we should retry given the incoming exception
@@ -449,27 +452,8 @@ public class TimelineClientImpl extends TimelineClient {
   final PrivilegedExceptionAction action)
   throws IOException, YarnException {
 // Set up the retry operation
-TimelineClientRetryOp tokenRetryOp = new TimelineClientRetryOp() {
-
-  @Override
-  public Object run() throws IOException {
-// Try pass the request, if fail, keep retrying
-authUgi.checkTGTAndReloginFromKeytab();
-try {
-  return authUgi.doAs(action);
-} catch (UndeclaredThrowableException e) {
-  throw new IOException(e.getCause());
-} catch (InterruptedException e) {
-  throw new IOException(e);
-}
-  }
-
-  @Override
-  public boolean shouldRetryOn(Exception e) {
-// Only retry on connection exceptions
-return (e instanceof ConnectException);
-  }
-};
+TimelineClientRetryOp tokenRetryOp =
+createTimelineClientRetryOpForOperateDelegationToken(action);
 
 return connectionRetry.retryOn(tokenRetryOp);
   }
@@ -680,4 +664,50 @@ public class TimelineClientImpl extends TimelineClient {
   public void setTimelineWriter(TimelineWriter writer) {
 this.timelineWriter = writer;
   }
+
+  @Private
+  @VisibleForTesting
+  public TimelineClientRetryOp
+  createTimelineClientRetryOpForOperateDelegationToken(
+  final PrivilegedExceptionAction action) throws IOException {
+return new TimelineClientRetryOpForOperateDelegationToken(
+this.authUgi, action);
+  }
+
+  @Private
+  @VisibleForTesting
+  public class TimelineClientRetryOpForOperateDelegationToken
+  extends TimelineClientRetryOp {
+
+private final UserGroupInformation authUgi;
+private final PrivilegedExceptionAction action;
+
+public TimelineClientRetryOpForOperateDelegationToken(
+UserGroupInformation authUgi, PrivilegedExceptionAction action) {
+  this.authUgi = authUgi;
+  this.action = action;
+}
+
+@Override
+public Object run() throws IOException {
+  // Try pass the request, if fail, keep retrying
+  authUgi.checkTGTAndReloginFromKeytab();
+  try {
+return authUgi.doAs(action);
+   

hadoop git commit: Fixed TimelineClient to retry SocketTimeoutException too. Contributed by Xuan Gong.

2016-04-18 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 aa846bd49 -> 342bb438f


Fixed TimelineClient to retry SocketTimeoutException too. Contributed by Xuan 
Gong.

(cherry picked from commit 477003730e6a7c7eff11892f5cedf74073ca867b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/342bb438
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/342bb438
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/342bb438

Branch: refs/heads/branch-2
Commit: 342bb438f281b822ebfcae231e46136280904c20
Parents: aa846bd
Author: Vinod Kumar Vavilapalli 
Authored: Mon Apr 18 11:47:06 2016 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Mon Apr 18 11:49:00 2016 -0700

--
 .../client/api/impl/TimelineClientImpl.java | 74 ++--
 .../client/api/impl/TestTimelineClient.java | 41 +++
 2 files changed, 93 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/342bb438/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index ef46229..8c60041 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -24,6 +24,7 @@ import java.lang.reflect.UndeclaredThrowableException;
 import java.net.ConnectException;
 import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
+import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.URL;
 import java.net.URLConnection;
@@ -116,7 +117,9 @@ public class TimelineClientImpl extends TimelineClient {
   TimelineClientConnectionRetry connectionRetry;
 
   // Abstract class for an operation that should be retried by timeline client
-  private static abstract class TimelineClientRetryOp {
+  @Private
+  @VisibleForTesting
+  public static abstract class TimelineClientRetryOp {
 // The operation that should be retried
 public abstract Object run() throws IOException;
 // The method to indicate if we should retry given the incoming exception
@@ -449,27 +452,8 @@ public class TimelineClientImpl extends TimelineClient {
   final PrivilegedExceptionAction action)
   throws IOException, YarnException {
 // Set up the retry operation
-TimelineClientRetryOp tokenRetryOp = new TimelineClientRetryOp() {
-
-  @Override
-  public Object run() throws IOException {
-// Try pass the request, if fail, keep retrying
-authUgi.checkTGTAndReloginFromKeytab();
-try {
-  return authUgi.doAs(action);
-} catch (UndeclaredThrowableException e) {
-  throw new IOException(e.getCause());
-} catch (InterruptedException e) {
-  throw new IOException(e);
-}
-  }
-
-  @Override
-  public boolean shouldRetryOn(Exception e) {
-// Only retry on connection exceptions
-return (e instanceof ConnectException);
-  }
-};
+TimelineClientRetryOp tokenRetryOp =
+createTimelineClientRetryOpForOperateDelegationToken(action);
 
 return connectionRetry.retryOn(tokenRetryOp);
   }
@@ -680,4 +664,50 @@ public class TimelineClientImpl extends TimelineClient {
   public void setTimelineWriter(TimelineWriter writer) {
 this.timelineWriter = writer;
   }
+
+  @Private
+  @VisibleForTesting
+  public TimelineClientRetryOp
+  createTimelineClientRetryOpForOperateDelegationToken(
+  final PrivilegedExceptionAction action) throws IOException {
+return new TimelineClientRetryOpForOperateDelegationToken(
+this.authUgi, action);
+  }
+
+  @Private
+  @VisibleForTesting
+  public class TimelineClientRetryOpForOperateDelegationToken
+  extends TimelineClientRetryOp {
+
+private final UserGroupInformation authUgi;
+private final PrivilegedExceptionAction action;
+
+public TimelineClientRetryOpForOperateDelegationToken(
+UserGroupInformation authUgi, PrivilegedExceptionAction action) {
+  this.authUgi = authUgi;
+  this.action = action;
+}
+
+@Override
+public Object run() throws IOException {
+  // Try pass the request, if fail, keep retrying
+  authUgi.checkTGTAndReloginFromKeytab();
+  try {
+return authUgi.doAs(action);
+  

hadoop git commit: Fixed TimelineClient to retry SocketTimeoutException too. Contributed by Xuan Gong.

2016-04-18 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk d8b729e16 -> 477003730


Fixed TimelineClient to retry SocketTimeoutException too. Contributed by Xuan 
Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47700373
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47700373
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47700373

Branch: refs/heads/trunk
Commit: 477003730e6a7c7eff11892f5cedf74073ca867b
Parents: d8b729e
Author: Vinod Kumar Vavilapalli 
Authored: Mon Apr 18 11:47:06 2016 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Mon Apr 18 11:47:06 2016 -0700

--
 .../client/api/impl/TimelineClientImpl.java | 74 ++--
 .../client/api/impl/TestTimelineClient.java | 41 +++
 2 files changed, 93 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47700373/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index ef46229..8c60041 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -24,6 +24,7 @@ import java.lang.reflect.UndeclaredThrowableException;
 import java.net.ConnectException;
 import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
+import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.URL;
 import java.net.URLConnection;
@@ -116,7 +117,9 @@ public class TimelineClientImpl extends TimelineClient {
   TimelineClientConnectionRetry connectionRetry;
 
   // Abstract class for an operation that should be retried by timeline client
-  private static abstract class TimelineClientRetryOp {
+  @Private
+  @VisibleForTesting
+  public static abstract class TimelineClientRetryOp {
 // The operation that should be retried
 public abstract Object run() throws IOException;
 // The method to indicate if we should retry given the incoming exception
@@ -449,27 +452,8 @@ public class TimelineClientImpl extends TimelineClient {
   final PrivilegedExceptionAction action)
   throws IOException, YarnException {
 // Set up the retry operation
-TimelineClientRetryOp tokenRetryOp = new TimelineClientRetryOp() {
-
-  @Override
-  public Object run() throws IOException {
-// Try pass the request, if fail, keep retrying
-authUgi.checkTGTAndReloginFromKeytab();
-try {
-  return authUgi.doAs(action);
-} catch (UndeclaredThrowableException e) {
-  throw new IOException(e.getCause());
-} catch (InterruptedException e) {
-  throw new IOException(e);
-}
-  }
-
-  @Override
-  public boolean shouldRetryOn(Exception e) {
-// Only retry on connection exceptions
-return (e instanceof ConnectException);
-  }
-};
+TimelineClientRetryOp tokenRetryOp =
+createTimelineClientRetryOpForOperateDelegationToken(action);
 
 return connectionRetry.retryOn(tokenRetryOp);
   }
@@ -680,4 +664,50 @@ public class TimelineClientImpl extends TimelineClient {
   public void setTimelineWriter(TimelineWriter writer) {
 this.timelineWriter = writer;
   }
+
+  @Private
+  @VisibleForTesting
+  public TimelineClientRetryOp
+  createTimelineClientRetryOpForOperateDelegationToken(
+  final PrivilegedExceptionAction action) throws IOException {
+return new TimelineClientRetryOpForOperateDelegationToken(
+this.authUgi, action);
+  }
+
+  @Private
+  @VisibleForTesting
+  public class TimelineClientRetryOpForOperateDelegationToken
+  extends TimelineClientRetryOp {
+
+private final UserGroupInformation authUgi;
+private final PrivilegedExceptionAction action;
+
+public TimelineClientRetryOpForOperateDelegationToken(
+UserGroupInformation authUgi, PrivilegedExceptionAction action) {
+  this.authUgi = authUgi;
+  this.action = action;
+}
+
+@Override
+public Object run() throws IOException {
+  // Try pass the request, if fail, keep retrying
+  authUgi.checkTGTAndReloginFromKeytab();
+  try {
+return authUgi.doAs(action);
+  } catch (UndeclaredThrowableException e) {
+throw new 

hadoop git commit: MAPREDUCE-6633. AM should retry map attempts if the reduce task encounters commpression related errors. Contributed by Rushabh Shah (cherry picked from commit 1fec06e037d2b22dafc64f

2016-04-18 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 91ca90fdd -> 2ec5bb0fe


MAPREDUCE-6633. AM should retry map attempts if the reduce task encounters 
commpression related errors. Contributed by Rushabh Shah
(cherry picked from commit 1fec06e037d2b22dafc64f33d4f1231bef4ceba8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ec5bb0f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ec5bb0f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ec5bb0f

Branch: refs/heads/branch-2.7
Commit: 2ec5bb0fe88d5e91bdeef5b2b4f19bf0e866f939
Parents: 91ca90f
Author: Eric Payne 
Authored: Sat Apr 9 16:51:57 2016 +
Committer: Eric Payne 
Committed: Mon Apr 18 16:12:53 2016 +

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../hadoop/mapreduce/task/reduce/Fetcher.java   |  2 +-
 .../mapreduce/task/reduce/TestFetcher.java  | 37 
 3 files changed, 41 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ec5bb0f/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 31ec4a1..69e5d5c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -20,6 +20,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6633. AM should retry map attempts if the reduce task encounters
+commpression related errors (Rushabh Shah via epayne)
+
 MAPREDUCE-4785. TestMRApp occasionally fails (haibochen via rkanter)
 
 MAPREDUCE-6540. TestMRTimelineEventHandling fails (sjlee)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ec5bb0f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
index 4b80dc9..2e255f8 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
@@ -536,7 +536,7 @@ class Fetcher extends Thread {
 + " len: " + compressedLength + " to " + 
mapOutput.getDescription());
 mapOutput.shuffle(host, is, compressedLength, decompressedLength,
 metrics, reporter);
-  } catch (java.lang.InternalError e) {
+  } catch (java.lang.InternalError | Exception e) {
 LOG.warn("Failed to shuffle for fetcher#"+id, e);
 throw new IOException(e);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ec5bb0f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java
index a9cd33e..f31e160 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java
@@ -348,6 +348,43 @@ public class TestFetcher {
   
   @SuppressWarnings("unchecked")
   @Test(timeout=1) 
+  public void testCopyFromHostOnAnyException() throws Exception {
+InMemoryMapOutput immo = mock(InMemoryMapOutput.class);
+
+Fetcher underTest = new FakeFetcher(job, id, ss, mm,
+r, metrics, except, key, connection);
+
+String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), 
key);
+
+when(connection.getResponseCode()).thenReturn(200);
+when(connection.getHeaderField(
+SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
+ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
+ByteArrayOutputStream bout = new 

hadoop git commit: libhdfs++: File length doesn't always count the last block if it's being written to. Contributed by Xiaowei Zhu.

2016-04-18 Thread jhc
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8707 082860052 -> d8653c8dc


libhdfs++: File length doesn't always count the last block if it's being 
written to. Contributed by Xiaowei Zhu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8653c8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8653c8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8653c8d

Branch: refs/heads/HDFS-8707
Commit: d8653c8dc2ab9847d531cf48d1cd4dd6b005c3ff
Parents: 0828600
Author: James 
Authored: Mon Apr 18 09:25:53 2016 -0400
Committer: James 
Committed: Mon Apr 18 09:25:53 2016 -0400

--
 .../src/main/native/libhdfspp/lib/fs/filesystem.cc  | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8653c8d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
index 569b479..8530ffa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
@@ -104,6 +104,7 @@ void NameNodeOperations::GetBlockLocations(const 
std::string & path,
 
   if (locations.has_lastblock() && locations.lastblock().b().numbytes()) {
 file_info->blocks_.push_back(locations.lastblock());
+file_info->file_length_ += locations.lastblock().b().numbytes();
   }
 
   handler(stat, file_info);



hadoop git commit: HDFS-10302. BlockPlacementPolicyDefault should use default replication considerload value. Contributed by Lin Yiqun.

2016-04-18 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 09ffc65d9 -> 7ac2e5ec7


HDFS-10302. BlockPlacementPolicyDefault should use default replication 
considerload value. Contributed by  Lin Yiqun.

(cherry picked from commit d8b729e16fb253e6c84f414d419b5663d9219a43)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ac2e5ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ac2e5ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ac2e5ec

Branch: refs/heads/branch-2.8
Commit: 7ac2e5ec7be31e19e80862510acdd927ce3f87fd
Parents: 09ffc65
Author: Kihwal Lee 
Authored: Mon Apr 18 08:01:17 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 18 08:01:17 2016 -0500

--
 .../hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ac2e5ec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index ee891a5..63e96c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -79,7 +79,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
  NetworkTopology clusterMap, 
  Host2NodesMap host2datanodeMap) {
 this.considerLoad = conf.getBoolean(
-DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT);
 this.considerLoadFactor = conf.getDouble(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);



hadoop git commit: HDFS-10302. BlockPlacementPolicyDefault should use default replication considerload value. Contributed by Lin Yiqun.

2016-04-18 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a06ee5a71 -> aa846bd49


HDFS-10302. BlockPlacementPolicyDefault should use default replication 
considerload value. Contributed by  Lin Yiqun.

(cherry picked from commit d8b729e16fb253e6c84f414d419b5663d9219a43)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa846bd4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa846bd4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa846bd4

Branch: refs/heads/branch-2
Commit: aa846bd49271d1911385fcaa0b74526d80304fd0
Parents: a06ee5a
Author: Kihwal Lee 
Authored: Mon Apr 18 08:00:44 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 18 08:00:44 2016 -0500

--
 .../hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa846bd4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index ee891a5..63e96c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -79,7 +79,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
  NetworkTopology clusterMap, 
  Host2NodesMap host2datanodeMap) {
 this.considerLoad = conf.getBoolean(
-DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT);
 this.considerLoadFactor = conf.getDouble(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);



hadoop git commit: HDFS-10302. BlockPlacementPolicyDefault should use default replication considerload value. Contributed by Lin Yiqun.

2016-04-18 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk ab903029a -> d8b729e16


HDFS-10302. BlockPlacementPolicyDefault should use default replication 
considerload value. Contributed by  Lin Yiqun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8b729e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8b729e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8b729e1

Branch: refs/heads/trunk
Commit: d8b729e16fb253e6c84f414d419b5663d9219a43
Parents: ab90302
Author: Kihwal Lee 
Authored: Mon Apr 18 07:58:55 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 18 07:58:55 2016 -0500

--
 .../hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8b729e1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index f20f5fb..474a5e7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -79,7 +79,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
  NetworkTopology clusterMap, 
  Host2NodesMap host2datanodeMap) {
 this.considerLoad = conf.getBoolean(
-DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT);
 this.considerLoadFactor = conf.getDouble(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);



hadoop git commit: HDFS-10275. TestDataNodeMetrics failing intermittently due to TotalWriteTime counted incorrectly. Contributed by Lin Yiqun.

2016-04-18 Thread waltersu4549
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 0ad463458 -> 91ca90fdd


HDFS-10275. TestDataNodeMetrics failing intermittently due to TotalWriteTime 
counted incorrectly. Contributed by Lin Yiqun.

(cherry picked from commit ab903029a9d353677184ff5602966b11ffb408b9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91ca90fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91ca90fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91ca90fd

Branch: refs/heads/branch-2.7
Commit: 91ca90fdd11129f37b32db85c10e9f36cbea08bb
Parents: 0ad4634
Author: Walter Su 
Authored: Mon Apr 18 20:29:29 2016 +0800
Committer: Walter Su 
Committed: Mon Apr 18 20:30:48 2016 +0800

--
 .../hadoop/hdfs/server/datanode/TestDataNodeMetrics.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91ca90fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 3423f02..46d85e8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -257,10 +257,9 @@ public class TestDataNodeMetrics {
* and reading causes totalReadTime to move.
* @throws Exception
*/
-  @Test(timeout=6)
+  @Test(timeout=12)
   public void testDataNodeTimeSpend() throws Exception {
 Configuration conf = new HdfsConfiguration();
-SimulatedFSDataset.setFactory(conf);
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
 try {
   final FileSystem fs = cluster.getFileSystem();
@@ -283,6 +282,7 @@ public class TestDataNodeMetrics {
 DFSTestUtil.createFile(fs, new Path("/time.txt." + x.get()),
 LONG_FILE_LEN, (short) 1, Time.monotonicNow());
 DFSTestUtil.readFile(fs, new Path("/time.txt." + x.get()));
+fs.delete(new Path("/time.txt." + x.get()), true);
   } catch (IOException ioe) {
 LOG.error("Caught IOException while ingesting DN metrics", ioe);
 return false;
@@ -293,7 +293,7 @@ public class TestDataNodeMetrics {
   return endWriteValue > startWriteValue
   && endReadValue > startReadValue;
 }
-  }, 30, 3);
+  }, 30, 6);
 } finally {
   if (cluster != null) {
 cluster.shutdown();



hadoop git commit: HDFS-10275. TestDataNodeMetrics failing intermittently due to TotalWriteTime counted incorrectly. Contributed by Lin Yiqun.

2016-04-18 Thread waltersu4549
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 e9699aa44 -> 09ffc65d9


HDFS-10275. TestDataNodeMetrics failing intermittently due to TotalWriteTime 
counted incorrectly. Contributed by Lin Yiqun.

(cherry picked from commit ab903029a9d353677184ff5602966b11ffb408b9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09ffc65d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09ffc65d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09ffc65d

Branch: refs/heads/branch-2.8
Commit: 09ffc65d94f0ea18ac797ba5a4def0009db82d15
Parents: e9699aa
Author: Walter Su 
Authored: Mon Apr 18 20:29:29 2016 +0800
Committer: Walter Su 
Committed: Mon Apr 18 20:32:36 2016 +0800

--
 .../hadoop/hdfs/server/datanode/TestDataNodeMetrics.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ffc65d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 5f9b602..355f7a1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -258,10 +258,9 @@ public class TestDataNodeMetrics {
* and reading causes totalReadTime to move.
* @throws Exception
*/
-  @Test(timeout=6)
+  @Test(timeout=12)
   public void testDataNodeTimeSpend() throws Exception {
 Configuration conf = new HdfsConfiguration();
-SimulatedFSDataset.setFactory(conf);
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
 try {
   final FileSystem fs = cluster.getFileSystem();
@@ -284,6 +283,7 @@ public class TestDataNodeMetrics {
 DFSTestUtil.createFile(fs, new Path("/time.txt." + x.get()),
 LONG_FILE_LEN, (short) 1, Time.monotonicNow());
 DFSTestUtil.readFile(fs, new Path("/time.txt." + x.get()));
+fs.delete(new Path("/time.txt." + x.get()), true);
   } catch (IOException ioe) {
 LOG.error("Caught IOException while ingesting DN metrics", ioe);
 return false;
@@ -294,7 +294,7 @@ public class TestDataNodeMetrics {
   return endWriteValue > startWriteValue
   && endReadValue > startReadValue;
 }
-  }, 30, 3);
+  }, 30, 6);
 } finally {
   if (cluster != null) {
 cluster.shutdown();



hadoop git commit: HDFS-10275. TestDataNodeMetrics failing intermittently due to TotalWriteTime counted incorrectly. Contributed by Lin Yiqun.

2016-04-18 Thread waltersu4549
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 dd6476969 -> a06ee5a71


HDFS-10275. TestDataNodeMetrics failing intermittently due to TotalWriteTime 
counted incorrectly. Contributed by Lin Yiqun.

(cherry picked from commit ab903029a9d353677184ff5602966b11ffb408b9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a06ee5a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a06ee5a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a06ee5a7

Branch: refs/heads/branch-2
Commit: a06ee5a71d38e5d84a9c60f6d827b4d13739f52b
Parents: dd64769
Author: Walter Su 
Authored: Mon Apr 18 20:29:29 2016 +0800
Committer: Walter Su 
Committed: Mon Apr 18 20:31:55 2016 +0800

--
 .../hadoop/hdfs/server/datanode/TestDataNodeMetrics.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a06ee5a7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 5f9b602..355f7a1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -258,10 +258,9 @@ public class TestDataNodeMetrics {
* and reading causes totalReadTime to move.
* @throws Exception
*/
-  @Test(timeout=6)
+  @Test(timeout=12)
   public void testDataNodeTimeSpend() throws Exception {
 Configuration conf = new HdfsConfiguration();
-SimulatedFSDataset.setFactory(conf);
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
 try {
   final FileSystem fs = cluster.getFileSystem();
@@ -284,6 +283,7 @@ public class TestDataNodeMetrics {
 DFSTestUtil.createFile(fs, new Path("/time.txt." + x.get()),
 LONG_FILE_LEN, (short) 1, Time.monotonicNow());
 DFSTestUtil.readFile(fs, new Path("/time.txt." + x.get()));
+fs.delete(new Path("/time.txt." + x.get()), true);
   } catch (IOException ioe) {
 LOG.error("Caught IOException while ingesting DN metrics", ioe);
 return false;
@@ -294,7 +294,7 @@ public class TestDataNodeMetrics {
   return endWriteValue > startWriteValue
   && endReadValue > startReadValue;
 }
-  }, 30, 3);
+  }, 30, 6);
 } finally {
   if (cluster != null) {
 cluster.shutdown();



hadoop git commit: HDFS-10275. TestDataNodeMetrics failing intermittently due to TotalWriteTime counted incorrectly. Contributed by Lin Yiqun.

2016-04-18 Thread waltersu4549
Repository: hadoop
Updated Branches:
  refs/heads/trunk 67523ffcf -> ab903029a


HDFS-10275. TestDataNodeMetrics failing intermittently due to TotalWriteTime 
counted incorrectly. Contributed by Lin Yiqun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab903029
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab903029
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab903029

Branch: refs/heads/trunk
Commit: ab903029a9d353677184ff5602966b11ffb408b9
Parents: 67523ff
Author: Walter Su 
Authored: Mon Apr 18 20:29:29 2016 +0800
Committer: Walter Su 
Committed: Mon Apr 18 20:29:29 2016 +0800

--
 .../hadoop/hdfs/server/datanode/TestDataNodeMetrics.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab903029/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 5f9b602..355f7a1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -258,10 +258,9 @@ public class TestDataNodeMetrics {
* and reading causes totalReadTime to move.
* @throws Exception
*/
-  @Test(timeout=6)
+  @Test(timeout=12)
   public void testDataNodeTimeSpend() throws Exception {
 Configuration conf = new HdfsConfiguration();
-SimulatedFSDataset.setFactory(conf);
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
 try {
   final FileSystem fs = cluster.getFileSystem();
@@ -284,6 +283,7 @@ public class TestDataNodeMetrics {
 DFSTestUtil.createFile(fs, new Path("/time.txt." + x.get()),
 LONG_FILE_LEN, (short) 1, Time.monotonicNow());
 DFSTestUtil.readFile(fs, new Path("/time.txt." + x.get()));
+fs.delete(new Path("/time.txt." + x.get()), true);
   } catch (IOException ioe) {
 LOG.error("Caught IOException while ingesting DN metrics", ioe);
 return false;
@@ -294,7 +294,7 @@ public class TestDataNodeMetrics {
   return endWriteValue > startWriteValue
   && endReadValue > startReadValue;
 }
-  }, 30, 3);
+  }, 30, 6);
 } finally {
   if (cluster != null) {
 cluster.shutdown();



[1/2] hadoop git commit: HDFS-10291 TestShortCircuitLocalRead failing (stevel)

2016-04-18 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a6d0e9e56 -> dd6476969
  refs/heads/branch-2.8 7f47d984f -> e9699aa44


HDFS-10291 TestShortCircuitLocalRead failing (stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9699aa4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9699aa4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9699aa4

Branch: refs/heads/branch-2.8
Commit: e9699aa443f20309b5575bd7dfe1f8b7b7639ed6
Parents: 7f47d98
Author: Steve Loughran 
Authored: Mon Apr 18 10:26:35 2016 +0100
Committer: Steve Loughran 
Committed: Mon Apr 18 10:26:35 2016 +0100

--
 .../hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java   | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9699aa4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
index f4fbebc..116dc88 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
@@ -153,8 +153,11 @@ public class TestShortCircuitLocalRead {
 //Read a small number of bytes first.
 int nread = stm.read(actual, 0, 3);
 nread += stm.read(actual, nread, 2);
-//Read across chunk boundary
-nread += stm.read(actual, nread, 517);
+int len = 517;
+if (actual.length - nread >= len) {
+  //Read across chunk boundary
+  nread += stm.read(actual, nread, len);
+}
 checkData(actual, readOffset, expected, nread, "A few bytes");
 //Now read rest of it
 while (nread < actual.length) {



[2/2] hadoop git commit: HDFS-10291 TestShortCircuitLocalRead failing (stevel)

2016-04-18 Thread stevel
HDFS-10291 TestShortCircuitLocalRead failing (stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd647696
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd647696
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd647696

Branch: refs/heads/branch-2
Commit: dd64769699ea4c85abdf9c590b9db680837e8b27
Parents: a6d0e9e
Author: Steve Loughran 
Authored: Mon Apr 18 10:26:35 2016 +0100
Committer: Steve Loughran 
Committed: Mon Apr 18 10:26:58 2016 +0100

--
 .../hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java   | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd647696/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
index f4fbebc..116dc88 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
@@ -153,8 +153,11 @@ public class TestShortCircuitLocalRead {
 //Read a small number of bytes first.
 int nread = stm.read(actual, 0, 3);
 nread += stm.read(actual, nread, 2);
-//Read across chunk boundary
-nread += stm.read(actual, nread, 517);
+int len = 517;
+if (actual.length - nread >= len) {
+  //Read across chunk boundary
+  nread += stm.read(actual, nread, len);
+}
 checkData(actual, readOffset, expected, nread, "A few bytes");
 //Now read rest of it
 while (nread < actual.length) {



svn commit: r1739695 - /hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

2016-04-18 Thread devaraj
Author: devaraj
Date: Mon Apr 18 06:21:54 2016
New Revision: 1739695

URL: http://svn.apache.org/viewvc?rev=1739695=rev
Log:
Updated my name in PMC list

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1739695=1739694=1739695=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Mon 
Apr 18 06:21:54 2016
@@ -160,6 +160,14 @@

 

+ devaraj
+ Devaraj K
+ Intel
+ 
+ +5.5
+   
+
+   
  dhruba
  http://people.apache.org/~dhruba;>Dhruba 
Borthakur
  Facebook