hadoop git commit: YARN-3824. Fix two minor nits in member variable properties of YarnConfiguration. Contributed by Ray Chiang.

2015-06-18 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/trunk 295d678be - 2ad668748


YARN-3824. Fix two minor nits in member variable properties of
YarnConfiguration. Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ad66874
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ad66874
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ad66874

Branch: refs/heads/trunk
Commit: 2ad668748e2535e25e3b04656c29034d58074027
Parents: 295d678
Author: Devaraj K deva...@apache.org
Authored: Thu Jun 18 16:44:08 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Thu Jun 18 16:44:08 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ad66874/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 243edb3..f00170e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -536,6 +536,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3617. Fix WindowsResourceCalculatorPlugin.getCpuFrequency()
 returning always -1. (J.Andreina via devaraj)
 
+YARN-3824. Fix two minor nits in member variable properties
+of YarnConfiguration. (Ray Chiang via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ad66874/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 3ea1558..5d75a21 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -278,7 +278,7 @@ public class YarnConfiguration extends Configuration {
   + intermediate-data-encryption.enable;
 
   @Private
-  public static final Boolean DEFAULT_YARN_INTERMEDIATE_DATA_ENCRYPTION = 
false;
+  public static final boolean DEFAULT_YARN_INTERMEDIATE_DATA_ENCRYPTION = 
false;
 
   /** The address of the RM admin interface.*/
   public static final String RM_ADMIN_ADDRESS = 
@@ -729,7 +729,7 @@ public class YarnConfiguration extends Configuration {
 
   public static final String RM_PROXY_USER_PRIVILEGES_ENABLED = RM_PREFIX
   + proxy-user-privileges.enabled;
-  public static boolean DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED = false;
+  public static final boolean DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED = false;
 
   /**
* How many diagnostics/failure messages can be saved in RM for



hadoop git commit: YARN-3824. Fix two minor nits in member variable properties of YarnConfiguration. Contributed by Ray Chiang.

2015-06-18 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 80697e4f3 - 3b9698eca


YARN-3824. Fix two minor nits in member variable properties of
YarnConfiguration. Contributed by Ray Chiang.

(cherry picked from commit 2ad668748e2535e25e3b04656c29034d58074027)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b9698ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b9698ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b9698ec

Branch: refs/heads/branch-2
Commit: 3b9698ecac75326d0cd88929189eaed782b9b8b2
Parents: 80697e4
Author: Devaraj K deva...@apache.org
Authored: Thu Jun 18 16:44:08 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Thu Jun 18 16:46:33 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b9698ec/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b45e0c3..7a43bef 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -488,6 +488,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3617. Fix WindowsResourceCalculatorPlugin.getCpuFrequency()
 returning always -1. (J.Andreina via devaraj)
 
+YARN-3824. Fix two minor nits in member variable properties
+of YarnConfiguration. (Ray Chiang via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b9698ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e06ec1c..a777d7e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -278,7 +278,7 @@ public class YarnConfiguration extends Configuration {
   + intermediate-data-encryption.enable;
 
   @Private
-  public static final Boolean DEFAULT_YARN_INTERMEDIATE_DATA_ENCRYPTION = 
false;
+  public static final boolean DEFAULT_YARN_INTERMEDIATE_DATA_ENCRYPTION = 
false;
 
   /** The address of the RM admin interface.*/
   public static final String RM_ADMIN_ADDRESS = 
@@ -729,7 +729,7 @@ public class YarnConfiguration extends Configuration {
 
   public static final String RM_PROXY_USER_PRIVILEGES_ENABLED = RM_PREFIX
   + proxy-user-privileges.enabled;
-  public static boolean DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED = false;
+  public static final boolean DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED = false;
 
   /**
* How many diagnostics/failure messages can be saved in RM for



hadoop git commit: HDFS-8605. Merge Refactor of DFSOutputStream from HDFS-7285 branch. (vinayakumarb)

2015-06-18 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3b9698eca - 7f62e41b2


HDFS-8605. Merge Refactor of DFSOutputStream from HDFS-7285 branch. 
(vinayakumarb)

(cherry picked from commit 1c13519e1e7588c3e2974138d37bf3449ca8b3df)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f62e41b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f62e41b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f62e41b

Branch: refs/heads/branch-2
Commit: 7f62e41b2eaae3edc0a01fc5d8cdf32ff7ded708
Parents: 3b9698e
Author: Andrew Wang w...@apache.org
Authored: Thu Jun 18 08:48:09 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Thu Jun 18 08:48:23 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 59 ++--
 .../org/apache/hadoop/hdfs/DataStreamer.java|  7 ++-
 3 files changed, 40 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f62e41b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 469272a..001c9b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -309,6 +309,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6249. Output AclEntry in PBImageXmlWriter.
 (surendra singh lilhore via aajisaka)
 
+HDFS-8605. Merge Refactor of DFSOutputStream from HDFS-7285 branch.
+(vinayakumarb via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f62e41b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 30d6b6b..d160b2b 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -64,6 +64,8 @@ import org.apache.hadoop.util.Time;
 import org.apache.htrace.Sampler;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -86,6 +88,7 @@ import com.google.common.base.Preconditions;
 @InterfaceAudience.Private
 public class DFSOutputStream extends FSOutputSummer
 implements Syncable, CanSetDropBehind {
+  static final Logger LOG = LoggerFactory.getLogger(DFSOutputStream.class);
   /**
* Number of times to retry creating a file when there are transient 
* errors (typically related to encryption zones and KeyProvider operations).
@@ -413,21 +416,30 @@ public class DFSOutputStream extends FSOutputSummer
 //
 if (currentPacket.getNumChunks() == currentPacket.getMaxChunks() ||
 getStreamer().getBytesCurBlock() == blockSize) {
-  if (DFSClient.LOG.isDebugEnabled()) {
-DFSClient.LOG.debug(DFSClient writeChunk packet full seqno= +
-currentPacket.getSeqno() +
-, src= + src +
-, bytesCurBlock= + getStreamer().getBytesCurBlock() +
-, blockSize= + blockSize +
-, appendChunk= + getStreamer().getAppendChunk());
-  }
-  getStreamer().waitAndQueuePacket(currentPacket);
-  currentPacket = null;
+  enqueueCurrentPacketFull();
+}
+  }
 
-  adjustChunkBoundary();
+  void enqueueCurrentPacket() throws IOException {
+getStreamer().waitAndQueuePacket(currentPacket);
+currentPacket = null;
+  }
 
-  endBlock();
-}
+  void enqueueCurrentPacketFull() throws IOException {
+LOG.debug(enqueue full {}, src={}, bytesCurBlock={}, blockSize={},
++  appendChunk={}, {}, currentPacket, src, getStreamer()
+.getBytesCurBlock(), blockSize, getStreamer().getAppendChunk(),
+getStreamer());
+enqueueCurrentPacket();
+adjustChunkBoundary();
+endBlock();
+  }
+
+  /** create an empty packet to mark the end of the block. */
+  void setCurrentPacketToEmpty() throws InterruptedIOException {
+currentPacket = createPacket(0, 0, getStreamer().getBytesCurBlock(),
+getStreamer().getAndIncCurrentSeqno(), true);
+currentPacket.setSyncBlock(shouldSyncBlock);
   }
 
   /**
@@ -457,11 +469,8 @@ public class DFSOutputStream extends FSOutputSummer
*/
   protected void endBlock() throws 

hadoop git commit: HDFS-8605. Merge Refactor of DFSOutputStream from HDFS-7285 branch. (vinayakumarb)

2015-06-18 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2ad668748 - 1c13519e1


HDFS-8605. Merge Refactor of DFSOutputStream from HDFS-7285 branch. 
(vinayakumarb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c13519e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c13519e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c13519e

Branch: refs/heads/trunk
Commit: 1c13519e1e7588c3e2974138d37bf3449ca8b3df
Parents: 2ad6687
Author: Andrew Wang w...@apache.org
Authored: Thu Jun 18 08:48:09 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Thu Jun 18 08:48:09 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 59 ++--
 .../org/apache/hadoop/hdfs/DataStreamer.java|  7 ++-
 3 files changed, 40 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c13519e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2545bcf..a61cf78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -656,6 +656,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6249. Output AclEntry in PBImageXmlWriter.
 (surendra singh lilhore via aajisaka)
 
+HDFS-8605. Merge Refactor of DFSOutputStream from HDFS-7285 branch.
+(vinayakumarb via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c13519e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 4622be6..c16aef2 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -64,6 +64,8 @@ import org.apache.hadoop.util.Time;
 import org.apache.htrace.Sampler;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -86,6 +88,7 @@ import com.google.common.base.Preconditions;
 @InterfaceAudience.Private
 public class DFSOutputStream extends FSOutputSummer
 implements Syncable, CanSetDropBehind {
+  static final Logger LOG = LoggerFactory.getLogger(DFSOutputStream.class);
   /**
* Number of times to retry creating a file when there are transient 
* errors (typically related to encryption zones and KeyProvider operations).
@@ -413,21 +416,30 @@ public class DFSOutputStream extends FSOutputSummer
 //
 if (currentPacket.getNumChunks() == currentPacket.getMaxChunks() ||
 getStreamer().getBytesCurBlock() == blockSize) {
-  if (DFSClient.LOG.isDebugEnabled()) {
-DFSClient.LOG.debug(DFSClient writeChunk packet full seqno= +
-currentPacket.getSeqno() +
-, src= + src +
-, bytesCurBlock= + getStreamer().getBytesCurBlock() +
-, blockSize= + blockSize +
-, appendChunk= + getStreamer().getAppendChunk());
-  }
-  getStreamer().waitAndQueuePacket(currentPacket);
-  currentPacket = null;
+  enqueueCurrentPacketFull();
+}
+  }
 
-  adjustChunkBoundary();
+  void enqueueCurrentPacket() throws IOException {
+getStreamer().waitAndQueuePacket(currentPacket);
+currentPacket = null;
+  }
 
-  endBlock();
-}
+  void enqueueCurrentPacketFull() throws IOException {
+LOG.debug(enqueue full {}, src={}, bytesCurBlock={}, blockSize={},
++  appendChunk={}, {}, currentPacket, src, getStreamer()
+.getBytesCurBlock(), blockSize, getStreamer().getAppendChunk(),
+getStreamer());
+enqueueCurrentPacket();
+adjustChunkBoundary();
+endBlock();
+  }
+
+  /** create an empty packet to mark the end of the block. */
+  void setCurrentPacketToEmpty() throws InterruptedIOException {
+currentPacket = createPacket(0, 0, getStreamer().getBytesCurBlock(),
+getStreamer().getAndIncCurrentSeqno(), true);
+currentPacket.setSyncBlock(shouldSyncBlock);
   }
 
   /**
@@ -457,11 +469,8 @@ public class DFSOutputStream extends FSOutputSummer
*/
   protected void endBlock() throws IOException {
 if (getStreamer().getBytesCurBlock() == blockSize) {
-

[02/50] [abbrv] hadoop git commit: HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. Contributed by Zhe Zhang.

2015-06-18 Thread zjshen
HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. 
Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc9d48ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc9d48ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc9d48ac

Branch: refs/heads/YARN-2928
Commit: bc9d48accbf911662dc40c870a29c51e9ba5b23b
Parents: 98591bd
Author: Andrew Wang w...@apache.org
Authored: Fri Jun 12 11:35:39 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:04 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/blockmanagement/BlockCollection.java |   2 +-
 .../hdfs/server/blockmanagement/BlockInfo.java  |  24 +-
 .../blockmanagement/BlockInfoContiguous.java|  77 +---
 .../BlockInfoContiguousUnderConstruction.java   | 403 --
 .../BlockInfoUnderConstruction.java | 405 +++
 .../BlockInfoUnderConstructionContiguous.java   | 110 +
 .../server/blockmanagement/BlockManager.java|  40 +-
 .../ContiguousBlockStorageOp.java   | 106 +
 .../blockmanagement/DatanodeDescriptor.java |  13 +-
 .../server/blockmanagement/DatanodeManager.java |   4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  15 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  15 +-
 .../hdfs/server/namenode/FSImageFormat.java |   7 +-
 .../server/namenode/FSImageFormatPBINode.java   |   6 +-
 .../server/namenode/FSImageSerialization.java   |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  24 +-
 .../namenode/FileUnderConstructionFeature.java  |  10 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  14 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |   4 +-
 .../server/namenode/snapshot/FileDiffList.java  |   4 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   8 +-
 .../TestBlockInfoUnderConstruction.java |   6 +-
 .../blockmanagement/TestBlockManager.java   |   6 +-
 .../blockmanagement/TestHeartbeatHandling.java  |   8 +-
 .../blockmanagement/TestReplicationPolicy.java  |   5 +-
 .../namenode/TestBlockUnderConstruction.java|   4 +-
 .../TestCommitBlockSynchronization.java |   9 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |   6 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |   6 +-
 .../namenode/snapshot/SnapshotTestHelper.java   |   4 +-
 31 files changed, 769 insertions(+), 583 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9d48ac/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e315db6..033451e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -626,6 +626,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7923. The DataNodes should rate-limit their full block reports by
 asking the NN on heartbeat messages (cmccabe)
 
+HDFS-8499. Refactor BlockInfo class hierarchy with static helper class.
+(Zhe Zhang via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9d48ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 02a1d05..f11a825 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -79,7 +79,7 @@ public interface BlockCollection {
* Convert the last block of the collection to an under-construction block
* and set the locations.
*/
-  public BlockInfoContiguousUnderConstruction setLastBlock(BlockInfo lastBlock,
+  public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
   DatanodeStorageInfo[] targets) throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9d48ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 

[25/50] [abbrv] hadoop git commit: HDFS-8592. SafeModeException never get unwrapped. Contributed by Haohui Mai.

2015-06-18 Thread zjshen
HDFS-8592. SafeModeException never get unwrapped. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/388f1ff9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/388f1ff9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/388f1ff9

Branch: refs/heads/YARN-2928
Commit: 388f1ff9b79e3e15fd2f29c817c29fc78517e72a
Parents: 5032eb9
Author: Haohui Mai whe...@apache.org
Authored: Mon Jun 15 22:07:24 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:07 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  2 ++
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 11 ---
 .../hadoop/hdfs/server/namenode/SafeModeException.java   |  5 ++---
 .../test/java/org/apache/hadoop/hdfs/TestSafeMode.java   |  4 ++--
 4 files changed, 14 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/388f1ff9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 79e7820..0ae2882 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -895,6 +895,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8607. TestFileCorruption doesn't work as expected. (Walter Su via
 Arpit Agarwal)
 
+HDFS-8592. SafeModeException never get unwrapped. (wheat9)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/388f1ff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 518adb4..40c71ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1323,7 +1323,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   void checkNameNodeSafeMode(String errorMsg)
   throws RetriableException, SafeModeException {
 if (isInSafeMode()) {
-  SafeModeException se = new SafeModeException(errorMsg, safeMode);
+  SafeModeException se = newSafemodeException(errorMsg);
   if (haEnabled  haContext != null
haContext.getState().getServiceState() == HAServiceState.ACTIVE
shouldRetrySafeMode(this.safeMode)) {
@@ -1334,6 +1334,11 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 }
   }
 
+  private SafeModeException newSafemodeException(String errorMsg) {
+return new SafeModeException(errorMsg + . Name node is in safe  +
+mode.\n + safeMode.getTurnOffTip());
+  }
+
   boolean isPermissionEnabled() {
 return isPermissionEnabled;
   }
@@ -1803,8 +1808,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   for (LocatedBlock b : ret.blocks.getLocatedBlocks()) {
 // if safemode  no block locations yet then throw safemodeException
 if ((b.getLocations() == null) || (b.getLocations().length == 0)) {
-  SafeModeException se = new SafeModeException(
-  Zero blocklocations for  + src, safeMode);
+  SafeModeException se = newSafemodeException(
+  Zero blocklocations for  + src);
   if (haEnabled  haContext != null 
   haContext.getState().getServiceState() == HAServiceState.ACTIVE) 
{
 throw new RetriableException(se);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/388f1ff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java
index 458f052..8080bcf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java
@@ -32,8 +32,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceStability.Evolving
 public class SafeModeException extends IOException {
   private static final long serialVersionUID = 1L;
-

[11/50] [abbrv] hadoop git commit: HDFS-8593. Calculation of effective layout version mishandles comparison to current layout version in storage. Contributed by Chris Nauroth.

2015-06-18 Thread zjshen
HDFS-8593. Calculation of effective layout version mishandles comparison to 
current layout version in storage. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb7f8ec1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb7f8ec1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb7f8ec1

Branch: refs/heads/YARN-2928
Commit: fb7f8ec15eab00fe1548d6d6cd758124fc0ea83c
Parents: d6ff0e8
Author: cnauroth cnaur...@apache.org
Authored: Sat Jun 13 10:37:52 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:05 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hdfs/server/namenode/FSNamesystem.java  | 17 -
 .../hdfs/server/namenode/TestFSNamesystem.java  | 20 
 3 files changed, 35 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb7f8ec1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 51a0897..9aabd34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -886,6 +886,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. (cnauroth)
 
+HDFS-8593. Calculation of effective layout version mishandles comparison to
+current layout version in storage. (cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb7f8ec1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ef53692..f962373 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7004,10 +7004,17 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
* @return layout version in effect
*/
   public int getEffectiveLayoutVersion() {
-if (isRollingUpgrade()) {
-  int storageLV = fsImage.getStorage().getLayoutVersion();
-  if (storageLV =
-  NameNodeLayoutVersion.MINIMUM_COMPATIBLE_LAYOUT_VERSION) {
+return getEffectiveLayoutVersion(isRollingUpgrade(),
+fsImage.getStorage().getLayoutVersion(),
+NameNodeLayoutVersion.MINIMUM_COMPATIBLE_LAYOUT_VERSION,
+NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
+  }
+
+  @VisibleForTesting
+  static int getEffectiveLayoutVersion(boolean isRollingUpgrade, int storageLV,
+  int minCompatLV, int currentLV) {
+if (isRollingUpgrade) {
+  if (storageLV = minCompatLV) {
 // The prior layout version satisfies the minimum compatible layout
 // version of the current software.  Keep reporting the prior layout
 // as the effective one.  Downgrade is possible.
@@ -7016,7 +7023,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 }
 // The current software cannot satisfy the layout version of the prior
 // software.  Proceed with using the current layout version.
-return NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
+return currentLV;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb7f8ec1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index 8b0662c..26bb4f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -213,4 +213,24 @@ public class TestFSNamesystem {
 fsn.imageLoadComplete();
 assertTrue(fsn.isImageLoaded());
   }
+
+  @Test
+  public void testGetEffectiveLayoutVersion() {
+assertEquals(-63,
+FSNamesystem.getEffectiveLayoutVersion(true, -60, -61, -63));
+assertEquals(-61,
+FSNamesystem.getEffectiveLayoutVersion(true, -61, -61, -63));
+assertEquals(-62,
+

[49/50] [abbrv] hadoop git commit: MAPREDUCE-6373. The logger reports total input paths but it is referring to input files. Contributed by Bibin A Chundatt.

2015-06-18 Thread zjshen
MAPREDUCE-6373. The logger reports total input paths but it is referring
to input files. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05d63e67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05d63e67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05d63e67

Branch: refs/heads/YARN-2928
Commit: 05d63e6727508baa0a747838f5d41fd45f5b4e06
Parents: 8119664
Author: Devaraj K deva...@apache.org
Authored: Thu Jun 18 11:42:22 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:02 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/FileInputFormat.java   | 2 +-
 .../org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java| 3 +--
 3 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05d63e67/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 5b66604..f885542 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -501,6 +501,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple
 mappers. (Brahma Reddy Battula via aajisaka)
 
+MAPREDUCE-6373. The logger reports total input paths but it is referring
+to input files. (Bibin A Chundatt via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05d63e67/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
index c6cbd50..2c58ebe 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
@@ -253,7 +253,7 @@ public abstract class FileInputFormatK, V implements 
InputFormatK, V {
   LOG.debug(Time taken to get FileStatuses: 
   + sw.now(TimeUnit.MILLISECONDS));
 }
-LOG.info(Total input paths to process :  + result.length);
+LOG.info(Total input files to process :  + result.length);
 return result;
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05d63e67/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
index f5cd5d1..0c5ede9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.mapred.LocatedFileStatusFetcher;
-import org.apache.hadoop.mapred.SplitLocationInfo;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.Job;
@@ -287,7 +286,7 @@ public abstract class FileInputFormatK, V extends 
InputFormatK, V {
   LOG.debug(Time taken to get FileStatuses: 
   + sw.now(TimeUnit.MILLISECONDS));
 }
-LOG.info(Total input paths to process :  + result.size()); 
+LOG.info(Total input files to process :  + result.size());
 return result;
   }
 



[17/50] [abbrv] hadoop git commit: Move HDFS-8540 to 2.8 in CHANGES.txt.

2015-06-18 Thread zjshen
Move HDFS-8540 to 2.8 in CHANGES.txt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b181b870
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b181b870
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b181b870

Branch: refs/heads/YARN-2928
Commit: b181b870409cb93d6b7c12c2be31184f2e8c
Parents: fe8250d
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Mon Jun 15 17:01:55 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:06 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b181b870/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 584d94d..9822575 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -629,6 +629,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8499. Refactor BlockInfo class hierarchy with static helper class.
 (Zhe Zhang via wang)
 
+HDFS-8540.  Mover should exit with NO_MOVE_BLOCK if no block can be moved.
+(surendra singh lilhore via szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
@@ -917,9 +920,6 @@ Release 2.7.1 - UNRELEASED
 HDFS-8521. Add VisibleForTesting annotation to
 BlockPoolSlice#selectReplicaToDelete. (cmccabe)
 
-HDFS-8540.  Mover should exit with NO_MOVE_BLOCK if no block can be moved.
-(surendra singh lilhore via szetszwo)
-
   OPTIMIZATIONS
 
   BUG FIXES



[36/50] [abbrv] hadoop git commit: YARN-3148. Allow CORS related headers to passthrough in WebAppProxyServlet. Contributed by Varun Saxena.

2015-06-18 Thread zjshen
YARN-3148. Allow CORS related headers to passthrough in
WebAppProxyServlet. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7ed62f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7ed62f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7ed62f3

Branch: refs/heads/YARN-2928
Commit: c7ed62f38601517003e88619b55ef9d29b49c727
Parents: 7028124
Author: Devaraj K deva...@apache.org
Authored: Wed Jun 17 15:56:18 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:00 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../server/webproxy/WebAppProxyServlet.java |  5 +-
 .../server/webproxy/TestWebAppProxyServlet.java | 57 
 3 files changed, 64 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ed62f3/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b1659a4..ae9716c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -409,6 +409,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3789. Improve logs for LeafQueue#activateApplications().
 (Bibin A Chundatt via devaraj)
 
+YARN-3148. Allow CORS related headers to passthrough in WebAppProxyServlet.
+(Varun Saxena via devaraj)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ed62f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
index b1e355d..33f36f0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
@@ -81,7 +81,10 @@ public class WebAppProxyServlet extends HttpServlet {
 Accept-Encoding,
 Accept-Language,
 Accept-Charset,
-Content-Type));
+Content-Type,
+Origin,
+Access-Control-Request-Method,
+Access-Control-Request-Headers));
 
   public static final String PROXY_USER_COOKIE_NAME = proxy-user;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ed62f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
index 2a2ca2c..8e68c38 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
@@ -31,6 +31,7 @@ import java.net.HttpCookie;
 import java.net.HttpURLConnection;
 import java.net.URI;
 import java.net.URL;
+import java.util.Enumeration;
 import java.util.List;
 import java.util.Map;
 
@@ -72,6 +73,10 @@ public class TestWebAppProxyServlet {
 
   private static Server server;
   private static int originalPort = 0;
+  private static int numberOfHeaders = 0;
+  private static final String UNKNOWN_HEADER = Unknown-Header;
+  private static boolean hasUnknownHeader = false;
+
 
   /**
* Simple http server. Server should send answer with status 200
@@ -88,6 +93,9 @@ public class TestWebAppProxyServlet {
 originalPort = server.getConnectors()[0].getLocalPort();
 LOG.info(Running embedded servlet container at: http://localhost:;
 + originalPort);
+// This property needs to be set otherwise CORS Headers will be dropped
+// by HttpUrlConnection
+

[01/50] [abbrv] hadoop git commit: HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. Contributed by Zhe Zhang.

2015-06-18 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 9137aeae0 - 8c036a14e


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9d48ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
index 6b8388e..d081a6b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
@@ -22,7 +22,7 @@ import java.util.List;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
@@ -133,7 +133,7 @@ public class FileDiffList extends
 Block dontRemoveBlock = null;
 if (lastBlock != null  lastBlock.getBlockUCState().equals(
 HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
-  dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock)
+  dontRemoveBlock = ((BlockInfoUnderConstruction) lastBlock)
   .getTruncateBlock();
 }
 // Collect the remaining blocks of the file, ignoring truncate block

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9d48ac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 50b85c0..d06b024 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -109,7 +109,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -1612,9 +1612,9 @@ public class DFSTestUtil {
 BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
 assertTrue(Block  + blk +  should be under construction,  +
 got:  + storedBlock,
-storedBlock instanceof BlockInfoContiguousUnderConstruction);
-BlockInfoContiguousUnderConstruction ucBlock =
-  (BlockInfoContiguousUnderConstruction)storedBlock;
+storedBlock instanceof BlockInfoUnderConstruction);
+BlockInfoUnderConstruction ucBlock =
+  (BlockInfoUnderConstruction)storedBlock;
 // We expect that the replica with the most recent heart beat will be
 // the one to be in charge of the synchronization / recovery protocol.
 final DatanodeStorageInfo[] storages = 
ucBlock.getExpectedStorageLocations();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9d48ac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
index a7ba293..630cd1c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import 

[06/50] [abbrv] hadoop git commit: HADOOP-11971. Move test utilities for tracing from hadoop-hdfs to hadoop-common. Contributed by Masatake Iwasaki.

2015-06-18 Thread zjshen
HADOOP-11971. Move test utilities for tracing from hadoop-hdfs to 
hadoop-common. Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9eb54e45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9eb54e45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9eb54e45

Branch: refs/heads/YARN-2928
Commit: 9eb54e45951d0fe35a4087a129c3c69d27c6c4c7
Parents: 1bddd3f
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri Jun 12 07:25:15 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:04 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/tracing/SetSpanReceiver.java  | 109 +++
 .../org/apache/hadoop/tracing/TestTracing.java  |  94 ++--
 .../TestTracingShortCircuitLocalRead.java   |   4 +-
 4 files changed, 124 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9eb54e45/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4df6112..268b1db 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -645,6 +645,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12055. Deprecate usage of NativeIO#link. (Andrew Wang via cnauroth)
 
+HADOOP-11971. Move test utilities for tracing from hadoop-hdfs to
+hadoop-common. (Masatake Iwasaki via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9eb54e45/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
new file mode 100644
index 000..e242b74
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tracing;
+
+import com.google.common.base.Supplier;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.htrace.Span;
+import org.apache.htrace.SpanReceiver;
+import org.apache.htrace.HTraceConfiguration;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeoutException;
+import org.junit.Assert;
+
+/**
+ * Span receiver that puts all spans into a single set.
+ * This is useful for testing.
+ * p/
+ * We're not using HTrace's POJOReceiver here so as that doesn't
+ * push all the metrics to a static place, and would make testing
+ * SpanReceiverHost harder.
+ */
+public class SetSpanReceiver implements SpanReceiver {
+
+  public SetSpanReceiver(HTraceConfiguration conf) {
+  }
+
+  public void receiveSpan(Span span) {
+SetHolder.spans.put(span.getSpanId(), span);
+  }
+
+  public void close() {
+  }
+
+  public static void clear() {
+SetHolder.spans.clear();
+  }
+
+  public static int size() {
+return SetHolder.spans.size();
+  }
+
+  public static CollectionSpan getSpans() {
+return SetHolder.spans.values();
+  }
+
+  public static MapString, ListSpan getMap() {
+return SetHolder.getMap();
+  }
+
+  public static class SetHolder {
+public static ConcurrentHashMapLong, Span spans =
+new ConcurrentHashMapLong, Span();
+
+public static MapString, ListSpan getMap() {
+  MapString, ListSpan map = new HashMapString, ListSpan();
+
+  for (Span s : spans.values()) {
+ListSpan l = map.get(s.getDescription());

[08/50] [abbrv] hadoop git commit: HDFS-8583. Document that NFS gateway does not work with rpcbind on SLES 11. (Arpit Agarwal)

2015-06-18 Thread zjshen
HDFS-8583. Document that NFS gateway does not work with rpcbind on SLES 11. 
(Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78cf309f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78cf309f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78cf309f

Branch: refs/heads/YARN-2928
Commit: 78cf309f2a6ee0cbf462b53d4b324d0290a86e19
Parents: e1d39ce
Author: Arpit Agarwal a...@apache.org
Authored: Thu Jun 11 15:08:15 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:04 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78cf309f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 38c8721..b388f69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -987,6 +987,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8566. HDFS documentation about debug commands wrongly identifies them
 as hdfs dfs commands (Surendra Singh Lilhore via Colin P. McCabe)
 
+HDFS-8583. Document that NFS gateway does not work with rpcbind
+on SLES 11. (Arpit Agarwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78cf309f/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
index 4bca599..da7aa6f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
@@ -198,7 +198,7 @@ It's strongly recommended for the users to update a few 
configuration properties
 Start and stop NFS gateway service
 --
 
-Three daemons are required to provide NFS service: rpcbind (or portmap), 
mountd and nfsd. The NFS gateway process has both nfsd and mountd. It shares 
the HDFS root / as the only export. It is recommended to use the portmap 
included in NFS gateway package. Even though NFS gateway works with 
portmap/rpcbind provide by most Linux distributions, the package included 
portmap is needed on some Linux systems such as REHL6.2 due to an [rpcbind 
bug](https://bugzilla.redhat.com/show_bug.cgi?id=731542). More detailed 
discussions can be found in 
[HDFS-4763](https://issues.apache.org/jira/browse/HDFS-4763).
+Three daemons are required to provide NFS service: rpcbind (or portmap), 
mountd and nfsd. The NFS gateway process has both nfsd and mountd. It shares 
the HDFS root / as the only export. It is recommended to use the portmap 
included in NFS gateway package. Even though NFS gateway works with 
portmap/rpcbind provide by most Linux distributions, the package included 
portmap is needed on some Linux systems such as RHEL 6.2 and SLES 11, the 
former due to an [rpcbind 
bug](https://bugzilla.redhat.com/show_bug.cgi?id=731542). More detailed 
discussions can be found in 
[HDFS-4763](https://issues.apache.org/jira/browse/HDFS-4763).
 
 1.  Stop nfsv3 and rpcbind/portmap services provided by the platform (commands 
can be different on various Unix platforms):
 



[14/50] [abbrv] hadoop git commit: HDFS-8595. TestCommitBlockSynchronization fails in branch-2.7. (Patch applies to all branches). (Contributed by Arpit Agarwal)

2015-06-18 Thread zjshen
HDFS-8595. TestCommitBlockSynchronization fails in branch-2.7. (Patch applies 
to all branches). (Contributed by Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30d1fb0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30d1fb0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30d1fb0c

Branch: refs/heads/YARN-2928
Commit: 30d1fb0ca7e375de14dce3b69695d87713cc4316
Parents: 2b93ab3
Author: Arpit Agarwal a...@apache.org
Authored: Sun Jun 14 16:24:30 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:05 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hdfs/server/namenode/TestCommitBlockSynchronization.java  | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30d1fb0c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1caa8c5..cb2679a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1005,6 +1005,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8596. TestDistributedFileSystem et al tests are broken in branch-2
 due to incorrect setting of datanode attribute. (Yongjun Zhang)
 
+HDFS-8595. TestCommitBlockSynchronization fails in branch-2.7. (Patch
+applies to all branches). (Arpit Agarwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
index b6cb522..9de426e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
@@ -78,6 +78,7 @@ public class TestCommitBlockSynchronization {
 blockInfo.initializeBlockRecovery(genStamp);
 doReturn(blockInfo).when(file).removeLastBlock(any(Block.class));
 doReturn(true).when(file).isUnderConstruction();
+doReturn(new BlockInfoContiguous[1]).when(file).getBlocks();
 
 doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
 doReturn(blockInfo).when(file).getLastBlock();



[10/50] [abbrv] hadoop git commit: HADOOP-12001. Fixed LdapGroupsMapping to include configurable Posix UID and GID attributes during the search. Contributed by Patrick White.

2015-06-18 Thread zjshen
HADOOP-12001. Fixed LdapGroupsMapping to include configurable Posix UID and GID 
attributes during the search. Contributed by Patrick White.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68cc034c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68cc034c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68cc034c

Branch: refs/heads/YARN-2928
Commit: 68cc034c24683f24eb3fa6a9e0a2e4e9034555f3
Parents: e447ae3
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Mon Jun 15 14:22:34 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:05 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../hadoop/security/LdapGroupsMapping.java  | 29 +++-
 .../src/main/resources/core-default.xml | 18 
 .../TestLdapGroupsMappingWithPosixGroup.java| 25 +++--
 4 files changed, 60 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68cc034c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 268b1db..cdd396f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -899,6 +899,9 @@ Release 2.7.1 - UNRELEASED
 HADOOP-12078. The default retry policy does not handle RetriableException
 correctly. (Arpit Agarwal)
 
+HADOOP-12001. Fixed LdapGroupsMapping to include configurable Posix UID and
+GID attributes during the search. (Patrick White via vinodkv)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68cc034c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index df91b70..8f6203d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -150,10 +150,17 @@ public class LdapGroupsMapping
   public static final String GROUP_NAME_ATTR_DEFAULT = cn;
 
   /*
+   * LDAP attribute names to use when doing posix-like lookups
+   */
+  public static final String POSIX_UID_ATTR_KEY = LDAP_CONFIG_PREFIX + 
.posix.attr.uid.name;
+  public static final String POSIX_UID_ATTR_DEFAULT = uidNumber;
+
+  public static final String POSIX_GID_ATTR_KEY = LDAP_CONFIG_PREFIX + 
.posix.attr.gid.name;
+  public static final String POSIX_GID_ATTR_DEFAULT = gidNumber;
+
+  /*
* Posix attributes
*/
-  public static final String POSIX_UIDNUMBER = uidNumber;
-  public static final String POSIX_GIDNUMBER = gidNumber;
   public static final String POSIX_GROUP = posixGroup;
   public static final String POSIX_ACCOUNT = posixAccount;
 
@@ -186,6 +193,8 @@ public class LdapGroupsMapping
   private String userSearchFilter;
   private String groupMemberAttr;
   private String groupNameAttr;
+  private String posixUidAttr;
+  private String posixGidAttr;
   private boolean isPosix;
 
   public static int RECONNECT_RETRY_COUNT = 3;
@@ -256,8 +265,8 @@ public class LdapGroupsMapping
   if (isPosix) {
 String gidNumber = null;
 String uidNumber = null;
-Attribute gidAttribute = result.getAttributes().get(POSIX_GIDNUMBER);
-Attribute uidAttribute = result.getAttributes().get(POSIX_UIDNUMBER);
+Attribute gidAttribute = result.getAttributes().get(posixGidAttr);
+Attribute uidAttribute = result.getAttributes().get(posixUidAttr);
 if (gidAttribute != null) {
   gidNumber = gidAttribute.get().toString();
 }
@@ -267,7 +276,7 @@ public class LdapGroupsMapping
 if (uidNumber != null  gidNumber != null) {
   groupResults =
   ctx.search(baseDN,
-  (+ groupSearchFilter + (|( + POSIX_GIDNUMBER + ={0}) +
+  (+ groupSearchFilter + (|( + posixGidAttr + ={0}) +
   ( + groupMemberAttr + ={1}))),
   new Object[] { gidNumber, uidNumber },
   SEARCH_CONTROLS);
@@ -374,11 +383,17 @@ public class LdapGroupsMapping
 conf.get(GROUP_MEMBERSHIP_ATTR_KEY, GROUP_MEMBERSHIP_ATTR_DEFAULT);
 groupNameAttr =
 conf.get(GROUP_NAME_ATTR_KEY, GROUP_NAME_ATTR_DEFAULT);
+posixUidAttr =
+

[07/50] [abbrv] hadoop git commit: YARN-3794. TestRMEmbeddedElector fails because of ambiguous LOG reference. Contributed by Chengbing Liu.

2015-06-18 Thread zjshen
YARN-3794. TestRMEmbeddedElector fails because of ambiguous LOG reference.
Contributed by Chengbing Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bddd3f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bddd3f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bddd3f0

Branch: refs/heads/YARN-2928
Commit: 1bddd3f0b5314e68548ebc51b0fd406b8ae8f787
Parents: 78cf309
Author: Devaraj K deva...@apache.org
Authored: Fri Jun 12 13:42:49 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:04 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/server/resourcemanager/TestRMEmbeddedElector.java   | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bddd3f0/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b692fa4..1776a89 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -621,6 +621,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3778. Fix Yarn resourcemanger CLI usage. (Brahma Reddy Battula via 
xgong)
 
+YARN-3794. TestRMEmbeddedElector fails because of ambiguous LOG reference.
+(Chengbing Liu via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bddd3f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
index 1b0bf7e..20b1c0e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
@@ -102,9 +102,9 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 ServiceFailedException {
   try {
 callbackCalled.set(true);
-LOG.info(Callback called. Sleeping now);
+TestRMEmbeddedElector.LOG.info(Callback called. Sleeping 
now);
 Thread.sleep(delayMs);
-LOG.info(Sleep done);
+TestRMEmbeddedElector.LOG.info(Sleep done);
   } catch (InterruptedException e) {
 e.printStackTrace();
   }



hadoop git commit: HADOOP-12100. ImmutableFsPermission should not override applyUmask since that method doesn't modify the FsPermission (Bibin A. Chundatt via Colin P. McCabe)

2015-06-18 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1c13519e1 - 6e0a9f92f


HADOOP-12100. ImmutableFsPermission should not override applyUmask since that 
method doesn't modify the FsPermission (Bibin A. Chundatt via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e0a9f92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e0a9f92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e0a9f92

Branch: refs/heads/trunk
Commit: 6e0a9f92fe0052d39b95a605174b3f6423c6aae7
Parents: 1c13519
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Jun 18 11:30:29 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Thu Jun 18 11:39:05 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 4 
 .../java/org/apache/hadoop/fs/permission/FsPermission.java| 7 ++-
 2 files changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e0a9f92/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3430da6..2f5eda3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -914,6 +914,10 @@ Release 2.7.1 - UNRELEASED
 HADOOP-12078. The default retry policy does not handle RetriableException
 correctly. (Arpit Agarwal)
 
+HADOOP-12100. ImmutableFsPermission should not override applyUmask since
+that method doesn't modify the FsPermission (Bibin A Chundatt via Colin P.
+McCabe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e0a9f92/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index 264a095..0258293 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -375,13 +375,10 @@ public class FsPermission implements Writable {
 public ImmutableFsPermission(short permission) {
   super(permission);
 }
-@Override
-public FsPermission applyUMask(FsPermission umask) {
-  throw new UnsupportedOperationException();
-}
+
 @Override
 public void readFields(DataInput in) throws IOException {
   throw new UnsupportedOperationException();
-}
+}
   }
 }



hadoop git commit: HADOOP-12100. ImmutableFsPermission should not override applyUmask since that method doesn't modify the FsPermission (Bibin A. Chundatt via Colin P. McCabe)

2015-06-18 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7f62e41b2 - 2946e92f7


HADOOP-12100. ImmutableFsPermission should not override applyUmask since that 
method doesn't modify the FsPermission (Bibin A. Chundatt via Colin P. McCabe)

(cherry picked from commit 6e0a9f92fe0052d39b95a605174b3f6423c6aae7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2946e92f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2946e92f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2946e92f

Branch: refs/heads/branch-2
Commit: 2946e92f79a73cccf9533f31ad40471046496cb2
Parents: 7f62e41
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Jun 18 11:30:29 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Thu Jun 18 11:39:28 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 4 
 .../java/org/apache/hadoop/fs/permission/FsPermission.java| 7 ++-
 2 files changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2946e92f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d0b4e3e..1099a39 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -430,6 +430,10 @@ Release 2.7.1 - UNRELEASED
 HADOOP-12078. The default retry policy does not handle RetriableException
 correctly. (Arpit Agarwal)
 
+HADOOP-12100. ImmutableFsPermission should not override applyUmask since
+that method doesn't modify the FsPermission (Bibin A Chundatt via Colin P.
+McCabe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2946e92f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index 264a095..0258293 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -375,13 +375,10 @@ public class FsPermission implements Writable {
 public ImmutableFsPermission(short permission) {
   super(permission);
 }
-@Override
-public FsPermission applyUMask(FsPermission umask) {
-  throw new UnsupportedOperationException();
-}
+
 @Override
 public void readFields(DataInput in) throws IOException {
   throw new UnsupportedOperationException();
-}
+}
   }
 }



hadoop git commit: HADOOP-12100. ImmutableFsPermission should not override applyUmask since that method doesn't modify the FsPermission (Bibin A. Chundatt via Colin P. McCabe)

2015-06-18 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 c0a419b13 - e5f970341


HADOOP-12100. ImmutableFsPermission should not override applyUmask since that 
method doesn't modify the FsPermission (Bibin A. Chundatt via Colin P. McCabe)

(cherry picked from commit 6e0a9f92fe0052d39b95a605174b3f6423c6aae7)
(cherry picked from commit 2946e92f79a73cccf9533f31ad40471046496cb2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5f97034
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5f97034
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5f97034

Branch: refs/heads/branch-2.7
Commit: e5f9703419234123ef266fcee05cd32eea8d9b79
Parents: c0a419b
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Jun 18 11:30:29 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Thu Jun 18 11:39:54 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 4 
 .../java/org/apache/hadoop/fs/permission/FsPermission.java| 7 ++-
 2 files changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5f97034/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9abf315..5839f08 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -47,6 +47,10 @@ Release 2.7.1 - UNRELEASED
 HADOOP-12078. The default retry policy does not handle RetriableException
 correctly. (Arpit Agarwal)
 
+HADOOP-12100. ImmutableFsPermission should not override applyUmask since
+that method doesn't modify the FsPermission (Bibin A Chundatt via Colin P.
+McCabe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5f97034/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index 264a095..0258293 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -375,13 +375,10 @@ public class FsPermission implements Writable {
 public ImmutableFsPermission(short permission) {
   super(permission);
 }
-@Override
-public FsPermission applyUMask(FsPermission umask) {
-  throw new UnsupportedOperationException();
-}
+
 @Override
 public void readFields(DataInput in) throws IOException {
   throw new UnsupportedOperationException();
-}
+}
   }
 }



[28/50] [abbrv] hadoop git commit: HDFS-8548. Minicluster throws NPE on shutdown. Contributed by surendra singh lilhore.

2015-06-18 Thread zjshen
HDFS-8548. Minicluster throws NPE on shutdown. Contributed by surendra singh 
lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/816ab879
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/816ab879
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/816ab879

Branch: refs/heads/YARN-2928
Commit: 816ab879661b6ed580b21f0b47771bec5115451f
Parents: b509768
Author: Xiaoyu Yao x...@apache.org
Authored: Tue Jun 16 13:52:50 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:18:58 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java   | 6 +-
 2 files changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/816ab879/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b921f2c..c8c36e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -900,6 +900,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8592. SafeModeException never get unwrapped. (wheat9)
 
+HDFS-8548. Minicluster throws NPE on shutdown.
+(surendra singh lilhore via xyao)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/816ab879/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index 9eb1059..0dbf485 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
@@ -198,7 +198,11 @@ class BlocksMap {
   }
 
   int size() {
-return blocks.size();
+if (blocks != null) {
+  return blocks.size();
+} else {
+  return 0;
+}
   }
 
   IterableBlockInfo getBlocks() {



[40/50] [abbrv] hadoop git commit: HDFS-8615. Correct HTTP method in WebHDFS document. Contributed by Brahma Reddy Battula.

2015-06-18 Thread zjshen
HDFS-8615. Correct HTTP method in WebHDFS document. Contributed by Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1816803
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1816803
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1816803

Branch: refs/heads/YARN-2928
Commit: c1816803b6ebe64fe6651f18f83c60ff9d2a251e
Parents: 59171b4
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Jun 17 17:13:02 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:01 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 12 ++--
 2 files changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1816803/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ef3530f3..8f563de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -924,6 +924,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8551. Fix hdfs datanode CLI usage message.
 (Brahma Reddy Battula via xyao)
 
+HDFS-8615. Correct HTTP method in WebHDFS document.
+(Brahma Reddy Battula via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1816803/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index d0231a3..e8f5fee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -691,7 +691,7 @@ See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setAcl
 
 * Submit a HTTP GET request.
 
-curl -i -X PUT http://HOST:PORT/webhdfs/v1/PATH?op=GETACLSTATUS
+curl -i http://HOST:PORT/webhdfs/v1/PATH?op=GETACLSTATUS
 
 The client receives a response with a [`AclStatus` JSON 
object](#ACL_Status_JSON_Schema):
 
@@ -718,7 +718,7 @@ See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getAclSta
 
 * Submit a HTTP GET request.
 
-curl -i -X PUT http://HOST:PORT/webhdfs/v1/PATH?op=CHECKACCESS
+curl -i http://HOST:PORT/webhdfs/v1/PATH?op=CHECKACCESS
   fsaction=FSACTION
 
 The client receives a response with zero content length:
@@ -764,7 +764,7 @@ See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).removeXAt
 
 * Submit a HTTP GET request.
 
-curl -i -X PUT http://HOST:PORT/webhdfs/v1/PATH?op=GETXATTRS
+curl -i http://HOST:PORT/webhdfs/v1/PATH?op=GETXATTRS
   
xattr.name=XATTRNAMEencoding=ENCODING
 
 The client receives a response with a [`XAttrs` JSON 
object](#XAttrs_JSON_Schema):
@@ -788,7 +788,7 @@ See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttr
 
 * Submit a HTTP GET request.
 
-curl -i -X PUT http://HOST:PORT/webhdfs/v1/PATH?op=GETXATTRS
+curl -i http://HOST:PORT/webhdfs/v1/PATH?op=GETXATTRS
   
xattr.name=XATTRNAME1xattr.name=XATTRNAME2
   encoding=ENCODING
 
@@ -817,7 +817,7 @@ See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttrs
 
 * Submit a HTTP GET request.
 
-curl -i -X PUT http://HOST:PORT/webhdfs/v1/PATH?op=GETXATTRS
+curl -i http://HOST:PORT/webhdfs/v1/PATH?op=GETXATTRS
   encoding=ENCODING
 
 The client receives a response with a [`XAttrs` JSON 
object](#XAttrs_JSON_Schema):
@@ -849,7 +849,7 @@ See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttrs
 
 * Submit a HTTP GET request.
 
-curl -i -X PUT http://HOST:PORT/webhdfs/v1/PATH?op=LISTXATTRS
+curl -i http://HOST:PORT/webhdfs/v1/PATH?op=LISTXATTRS
 
 The client receives a response with a [`XAttrNames` JSON 
object](#XAttrNames_JSON_Schema):
 



[37/50] [abbrv] hadoop git commit: HDFS-8608. Merge HDFS-7912 to trunk and branch-2 (track BlockInfo instead of Block in UnderReplicatedBlocks and PendingReplicationBlocks). Contributed by Zhe Zhang.

2015-06-18 Thread zjshen
HDFS-8608. Merge HDFS-7912 to trunk and branch-2 (track BlockInfo instead of 
Block in UnderReplicatedBlocks and PendingReplicationBlocks). Contributed by 
Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a4ccbd0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a4ccbd0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a4ccbd0

Branch: refs/heads/YARN-2928
Commit: 5a4ccbd0af7c2ba89c53ce77e0de7c12b5daa7a7
Parents: c7ed62f
Author: Andrew Wang w...@apache.org
Authored: Wed Jun 17 08:05:44 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:00 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  4 ++
 .../server/blockmanagement/BlockManager.java| 54 ---
 .../PendingReplicationBlocks.java   | 51 +++---
 .../blockmanagement/UnderReplicatedBlocks.java  | 57 
 .../hdfs/server/namenode/FSDirAttrOp.java   |  8 +--
 .../hdfs/server/namenode/FSNamesystem.java  | 21 +++---
 .../hadoop/hdfs/server/namenode/INode.java  | 12 ++--
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  4 +-
 .../hdfs/server/namenode/NamenodeFsck.java  | 58 
 .../hadoop/hdfs/server/namenode/SafeMode.java   |  4 +-
 .../blockmanagement/BlockManagerTestUtil.java   |  5 +-
 .../blockmanagement/TestBlockManager.java   |  8 +--
 .../server/blockmanagement/TestNodeCount.java   |  3 +-
 .../TestOverReplicatedBlocks.java   |  5 +-
 .../blockmanagement/TestPendingReplication.java | 27 +---
 .../TestRBWBlockInvalidation.java   |  4 +-
 .../blockmanagement/TestReplicationPolicy.java  | 70 +++-
 .../TestUnderReplicatedBlockQueues.java | 16 +++--
 .../datanode/TestReadOnlySharedStorage.java | 11 +--
 .../namenode/TestProcessCorruptBlocks.java  |  5 +-
 20 files changed, 231 insertions(+), 196 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a4ccbd0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 07cd4a8..a01446a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -640,6 +640,10 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-7164. Feature documentation for HDFS-6581. (Arpit Agarwal)
 
+HDFS-9608. Merge HDFS-7912 to trunk and branch-2 (track BlockInfo instead
+of Block in UnderReplicatedBlocks and PendingReplicationBlocks).
+(Zhe Zhang via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a4ccbd0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ebc9017..824801f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -588,7 +588,7 @@ public class BlockManager {
   /**
* @return true if the block has minimum replicas
*/
-  public boolean checkMinReplication(Block block) {
+  public boolean checkMinReplication(BlockInfo block) {
 return (countNodes(block).liveReplicas() = minReplication);
   }
 
@@ -1310,7 +1310,7 @@ public class BlockManager {
* @return number of blocks scheduled for replication during this iteration.
*/
   int computeReplicationWork(int blocksToProcess) {
-ListListBlock blocksToReplicate = null;
+ListListBlockInfo blocksToReplicate = null;
 namesystem.writeLock();
 try {
   // Choose the blocks to be replicated
@@ -1328,7 +1328,7 @@ public class BlockManager {
* @return the number of blocks scheduled for replication
*/
   @VisibleForTesting
-  int computeReplicationWorkForBlocks(ListListBlock blocksToReplicate) {
+  int computeReplicationWorkForBlocks(ListListBlockInfo blocksToReplicate) 
{
 int requiredReplication, numEffectiveReplicas;
 ListDatanodeDescriptor containingNodes;
 DatanodeDescriptor srcNode;
@@ -1342,7 +1342,7 @@ public class BlockManager {
 try {
   synchronized (neededReplications) {
 for (int priority = 0; priority  blocksToReplicate.size(); 
priority++) {
-  for (Block block : blocksToReplicate.get(priority)) {
+  

[33/50] [abbrv] hadoop git commit: HDFS-8551. Fix hdfs datanode CLI usage message. Contributed by Brahma Reddy Battula.

2015-06-18 Thread zjshen
HDFS-8551. Fix hdfs datanode CLI usage message. Contributed by Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/984e8bee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/984e8bee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/984e8bee

Branch: refs/heads/YARN-2928
Commit: 984e8bee01e71e97840b08e1bb782ceb921d260e
Parents: 76dc2f5
Author: Xiaoyu Yao x...@apache.org
Authored: Tue Jun 16 14:12:45 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:18:59 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java| 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/984e8bee/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c8c36e6..2d4c062 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -903,6 +903,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8548. Minicluster throws NPE on shutdown.
 (surendra singh lilhore via xyao)
 
+HDFS-8551. Fix hdfs datanode CLI usage message.
+(Brahma Reddy Battula via xyao)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/984e8bee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 3bd131e..74acf81 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -262,9 +262,11 @@ public class DataNode extends ReconfigurableBase
 LogFactory.getLog(DataNode.class.getName() + .clienttrace);
   
   private static final String USAGE =
-  Usage: java DataNode [-regular | -rollback]\n +
+  Usage: hdfs datanode [-regular | -rollback | -rollingupgrade rollback +
+   ]\n +
   -regular : Normal DataNode startup (default).\n +
   -rollback: Rollback a standard or rolling 
upgrade.\n +
+  -rollingupgrade rollback : Rollback a rolling upgrade operation.\n 
+
 Refer to HDFS documentation for the difference between standard\n +
 and rolling upgrades.;
 



[05/50] [abbrv] hadoop git commit: HDFS-7923. The DataNodes should rate-limit their full block reports by asking the NN on heartbeat messages (cmccabe)

2015-06-18 Thread zjshen
HDFS-7923. The DataNodes should rate-limit their full block reports by asking 
the NN on heartbeat messages (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7460011
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7460011
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7460011

Branch: refs/heads/YARN-2928
Commit: f7460011855bda4ec76f6761aa369ca52ec497a0
Parents: 9eb54e4
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Fri Jun 12 11:17:51 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:04 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   4 +
 .../DatanodeProtocolClientSideTranslatorPB.java |   8 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   3 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   3 +-
 .../server/blockmanagement/BlockManager.java|  41 ++-
 .../BlockManagerFaultInjector.java  |  52 +++
 .../BlockReportLeaseManager.java| 355 +++
 .../server/blockmanagement/DatanodeManager.java |   2 +
 .../hdfs/server/datanode/BPServiceActor.java|  71 +++-
 .../hadoop/hdfs/server/datanode/DNConf.java |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  11 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   9 +-
 .../server/protocol/BlockReportContext.java |  25 +-
 .../hdfs/server/protocol/DatanodeProtocol.java  |   5 +-
 .../hdfs/server/protocol/HeartbeatResponse.java |  10 +-
 .../hdfs/server/protocol/RegisterCommand.java   |   2 +-
 .../src/main/proto/DatanodeProtocol.proto   |   6 +
 .../src/main/resources/hdfs-default.xml |  21 ++
 .../hdfs/protocol/TestBlockListAsLongs.java |   4 +-
 .../TestBlockReportRateLimiting.java| 246 +
 .../blockmanagement/TestDatanodeManager.java|  21 +-
 .../TestNameNodePrunesMissingStorages.java  |   2 +-
 .../server/datanode/TestBPOfferService.java |   7 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   2 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   6 +-
 .../datanode/TestBpServiceActorScheduler.java   |   2 +-
 .../TestDatanodeProtocolRetryPolicy.java|   8 +-
 .../server/datanode/TestFsDatasetCache.java |   9 +-
 .../TestNNHandlesBlockReportPerStorage.java |   2 +-
 .../TestNNHandlesCombinedBlockReport.java   |   2 +-
 .../hdfs/server/datanode/TestStorageReport.java |   2 +-
 .../server/namenode/NNThroughputBenchmark.java  |   8 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   2 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   6 +-
 34 files changed, 890 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7460011/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 5bb6e53..3f72608 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -434,6 +434,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0;
   public static final String  DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY = 
dfs.blockreport.split.threshold;
   public static final longDFS_BLOCKREPORT_SPLIT_THRESHOLD_DEFAULT = 1000 * 
1000;
+  public static final String  DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES = 
dfs.namenode.max.full.block.report.leases;
+  public static final int 
DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES_DEFAULT = 6;
+  public static final String  DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS = 
dfs.namenode.full.block.report.lease.length.ms;
+  public static final long
DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS_DEFAULT = 5L * 60L * 1000L;
   public static final String  DFS_CACHEREPORT_INTERVAL_MSEC_KEY = 
dfs.cachereport.intervalMsec;
   public static final longDFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT = 10 * 
1000;
   public static final String  DFS_BLOCK_INVALIDATE_LIMIT_KEY = 
dfs.block.invalidate.limit;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7460011/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 

[21/50] [abbrv] hadoop git commit: MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple mappers. Contributed by Brahma Reddy Battula.

2015-06-18 Thread zjshen
MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple 
mappers. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fd568bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fd568bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fd568bd

Branch: refs/heads/YARN-2928
Commit: 9fd568bd0b118df95759d2514b375f111ce745fc
Parents: 77bbe95
Author: Akira Ajisaka aajis...@apache.org
Authored: Mon Jun 15 14:52:26 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:06 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt |  3 +++
 .../test/java/org/apache/hadoop/hdfs/NNBench.java| 15 +++
 2 files changed, 14 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fd568bd/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ead80a0..3c2e8f9 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -495,6 +495,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6389. Fix BaileyBorweinPlouffe CLI usage message. (Brahma Reddy 
Battula
 via devaraj)
 
+MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple
+mappers. (Brahma Reddy Battula via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fd568bd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
index bb50213..6c8ec17 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
@@ -57,6 +57,9 @@ import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reducer;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 
 /**
  * This program executes a specified operation that applies load to 
@@ -687,6 +690,9 @@ public class NNBench {
   dataDirName = conf.get(test.nnbench.datadir.name);
   op = conf.get(test.nnbench.operation);
   readFile = conf.getBoolean(test.nnbench.readFileAfterOpen, false);
+  int taskId =
+  TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID))
+  .getTaskID().getId();
   
   long totalTimeTPmS = 0l;
   long startTimeTPmS = 0l;
@@ -699,18 +705,19 @@ public class NNBench {
   successfulFileOps = 0l;
   
   if (barrier()) {
+String filePrefix = file_ + taskId + _;
 if (op.equals(OP_CREATE_WRITE)) {
   startTimeTPmS = System.currentTimeMillis();
-  doCreateWriteOp(file_ + hostName + _, reporter);
+  doCreateWriteOp(filePrefix, reporter);
 } else if (op.equals(OP_OPEN_READ)) {
   startTimeTPmS = System.currentTimeMillis();
-  doOpenReadOp(file_ + hostName + _, reporter);
+  doOpenReadOp(filePrefix, reporter);
 } else if (op.equals(OP_RENAME)) {
   startTimeTPmS = System.currentTimeMillis();
-  doRenameOp(file_ + hostName + _, reporter);
+  doRenameOp(filePrefix, reporter);
 } else if (op.equals(OP_DELETE)) {
   startTimeTPmS = System.currentTimeMillis();
-  doDeleteOp(file_ + hostName + _, reporter);
+  doDeleteOp(filePrefix, reporter);
 }
 
 endTimeTPms = System.currentTimeMillis();



[13/50] [abbrv] hadoop git commit: HDFS-8607. TestFileCorruption doesn't work as expected. (Contributed by Walter Su)

2015-06-18 Thread zjshen
HDFS-8607. TestFileCorruption doesn't work as expected. (Contributed by Walter 
Su)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e447ae3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e447ae3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e447ae3f

Branch: refs/heads/YARN-2928
Commit: e447ae3f2923a7c4cb82672553b3cf6384601bac
Parents: 30d1fb0
Author: Arpit Agarwal a...@apache.org
Authored: Mon Jun 15 10:11:53 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:05 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../apache/hadoop/hdfs/TestFileCorruption.java   | 19 +++
 2 files changed, 14 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e447ae3f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cb2679a..c98d918 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -889,6 +889,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8593. Calculation of effective layout version mishandles comparison to
 current layout version in storage. (cnauroth)
 
+HDFS-8607. TestFileCorruption doesn't work as expected. (Walter Su via
+Arpit Agarwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e447ae3f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 8001bfb..8e0ffe7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -27,8 +27,12 @@ import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.filefilter.DirectoryFileFilter;
+import org.apache.commons.io.filefilter.PrefixFileFilter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FileSystem;
@@ -74,14 +78,13 @@ public class TestFileCorruption {
   String bpid = cluster.getNamesystem().getBlockPoolId();
   File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
   assertTrue(data directory does not exist, data_dir.exists());
-  File[] blocks = data_dir.listFiles();
-  assertTrue(Blocks do not exist in data-dir, (blocks != null)  
(blocks.length  0));
-  for (int idx = 0; idx  blocks.length; idx++) {
-if (!blocks[idx].getName().startsWith(Block.BLOCK_FILE_PREFIX)) {
-  continue;
-}
-System.out.println(Deliberately removing file 
+blocks[idx].getName());
-assertTrue(Cannot remove file., blocks[idx].delete());
+  CollectionFile blocks = FileUtils.listFiles(data_dir,
+  new PrefixFileFilter(Block.BLOCK_FILE_PREFIX),
+  DirectoryFileFilter.DIRECTORY);
+  assertTrue(Blocks do not exist in data-dir, blocks.size()  0);
+  for (File block : blocks) {
+System.out.println(Deliberately removing file  + block.getName());
+assertTrue(Cannot remove file., block.delete());
   }
   assertTrue(Corrupted replicas not handled properly.,
  util.checkFiles(fs, /srcdat));



[16/50] [abbrv] hadoop git commit: HDFS-8540. Mover should exit with NO_MOVE_BLOCK if no block can be moved. Contributed by surendra singh lilhore

2015-06-18 Thread zjshen
HDFS-8540.  Mover should exit with NO_MOVE_BLOCK if no block can be moved.  
Contributed by surendra singh lilhore


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe8250df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe8250df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe8250df

Branch: refs/heads/YARN-2928
Commit: fe8250dff8895455ccf19ec07a84d55b0efa8c7f
Parents: 9e1a876
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Mon Jun 15 16:26:53 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:06 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../apache/hadoop/hdfs/server/mover/Mover.java  | 95 ++--
 .../hadoop/hdfs/server/mover/TestMover.java | 29 ++
 .../hdfs/server/mover/TestStorageMover.java | 18 ++--
 4 files changed, 107 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe8250df/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 21acf98..584d94d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -917,6 +917,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8521. Add VisibleForTesting annotation to
 BlockPoolSlice#selectReplicaToDelete. (cmccabe)
 
+HDFS-8540.  Mover should exit with NO_MOVE_BLOCK if no block can be moved.
+(surendra singh lilhore via szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe8250df/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index 8715ce4..344b9fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -27,7 +27,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -163,8 +162,7 @@ public class Mover {
   private ExitStatus run() {
 try {
   init();
-  boolean hasRemaining = new Processor().processNamespace();
-  return hasRemaining ? ExitStatus.IN_PROGRESS : ExitStatus.SUCCESS;
+  return new Processor().processNamespace().getExitStatus();
 } catch (IllegalArgumentException e) {
   System.out.println(e + .  Exiting ...);
   return ExitStatus.ILLEGAL_ARGUMENTS;
@@ -262,11 +260,11 @@ public class Mover {
  * @return whether there is still remaining migration work for the next
  * round
  */
-private boolean processNamespace() throws IOException {
+private Result processNamespace() throws IOException {
   getSnapshottableDirs();
-  boolean hasRemaining = false;
+  Result result = new Result();
   for (Path target : targetPaths) {
-hasRemaining |= processPath(target.toUri().getPath());
+processPath(target.toUri().getPath(), result);
   }
   // wait for pending move to finish and retry the failed migration
   boolean hasFailed = Dispatcher.waitForMoveCompletion(storages.targets
@@ -282,16 +280,15 @@ public class Mover {
 // Reset retry count if no failure.
 retryCount.set(0);
   }
-  hasRemaining |= hasFailed;
-  return hasRemaining;
+  result.updateHasRemaining(hasFailed);
+  return result;
 }
 
 /**
  * @return whether there is still remaing migration work for the next
  * round
  */
-private boolean processPath(String fullPath) {
-  boolean hasRemaining = false;
+private void processPath(String fullPath, Result result) {
   for (byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;;) {
 final DirectoryListing children;
 try {
@@ -299,73 +296,71 @@ public class Mover {
 } catch(IOException e) {
   LOG.warn(Failed to list directory  + fullPath
   + . Ignore the directory and continue., e);
-  return hasRemaining;
+  return;
 }
 if (children == null) {
-  return hasRemaining;
+  

[47/50] [abbrv] hadoop git commit: HADOOP-11965. determine-flaky-tests needs a summary mode. Contributed by Yufei Gu,

2015-06-18 Thread zjshen
HADOOP-11965. determine-flaky-tests needs a summary mode. Contributed by Yufei 
Gu,


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8119664b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8119664b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8119664b

Branch: refs/heads/YARN-2928
Commit: 8119664bf5cc9cd5245561af7c081d88e23d89e6
Parents: 19551cf
Author: Yongjun Zhang yzh...@cloudera.com
Authored: Wed Jun 17 15:48:29 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:02 2015 -0700

--
 dev-support/determine-flaky-tests-hadoop.py | 57 +---
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 2 files changed, 52 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8119664b/dev-support/determine-flaky-tests-hadoop.py
--
diff --git a/dev-support/determine-flaky-tests-hadoop.py 
b/dev-support/determine-flaky-tests-hadoop.py
index ce152ba..8644299 100755
--- a/dev-support/determine-flaky-tests-hadoop.py
+++ b/dev-support/determine-flaky-tests-hadoop.py
@@ -62,12 +62,19 @@ import time
 DEFAULT_JENKINS_URL = https://builds.apache.org;
 DEFAULT_JOB_NAME = Hadoop-Common-trunk
 DEFAULT_NUM_PREVIOUS_DAYS = 14
+DEFAULT_TOP_NUM_FAILED_TEST = -1
 
 SECONDS_PER_DAY = 86400
 
 # total number of runs to examine
 numRunsToExamine = 0
 
+#summary mode
+summary_mode = False
+
+#total number of errors
+error_count = 0
+
  Parse arguments 
 def parse_args():
   parser = OptionParser()
@@ -80,6 +87,10 @@ def parse_args():
   parser.add_option(-n, --num-days, type=int,
 dest=num_prev_days, help=Number of days to examine,
 default=DEFAULT_NUM_PREVIOUS_DAYS)
+  parser.add_option(-t, --top, type=int,
+dest=num_failed_tests,
+help=Summary Mode, only show top number of failed tests,
+default=DEFAULT_TOP_NUM_FAILED_TEST)
 
   (options, args) = parser.parse_args()
   if args:
@@ -100,6 +111,7 @@ def load_url_data(url):
  
  List all builds of the target project. 
 def list_builds(jenkins_url, job_name):
+  global summary_mode
   url = 
%(jenkins)s/job/%(job_name)s/api/json?tree=builds[url,result,timestamp] % 
dict(
   jenkins=jenkins_url,
   job_name=job_name)
@@ -108,19 +120,25 @@ def list_builds(jenkins_url, job_name):
 data = load_url_data(url)
 
   except:
-logging.error(Could not fetch: %s % url)
+if not summary_mode:
+  logging.error(Could not fetch: %s % url)
+error_count += 1
 raise
   return data['builds']
 
  Find the names of any tests which failed in the given build output URL. 
 def find_failing_tests(testReportApiJson, jobConsoleOutput):
+  global summary_mode
+  global error_count
   ret = set()
   try:
 data = load_url_data(testReportApiJson)
 
   except:
-logging.error(Could not open testReport, check  +
+if not summary_mode:
+  logging.error(Could not open testReport, check  +
 jobConsoleOutput +  for why it was reported failed)
+error_count += 1
 return ret
 
   for suite in data['suites']:
@@ -130,7 +148,7 @@ def find_failing_tests(testReportApiJson, jobConsoleOutput):
   if (status == 'REGRESSION' or status == 'FAILED' or (errDetails is not 
None)):
 ret.add(cs['className'] + . + cs['name'])
 
-  if len(ret) == 0:
+  if len(ret) == 0 and (not summary_mode):
 logging.info(No failed tests in testReport, check  +
 jobConsoleOutput +  for why it was reported failed.)
   return ret
@@ -138,6 +156,7 @@ def find_failing_tests(testReportApiJson, jobConsoleOutput):
  Iterate runs of specfied job within num_prev_days and collect results 
 def find_flaky_tests(jenkins_url, job_name, num_prev_days):
   global numRunsToExamine
+  global summary_mode
   all_failing = dict()
   # First list all builds
   builds = list_builds(jenkins_url, job_name)
@@ -153,7 +172,8 @@ def find_flaky_tests(jenkins_url, job_name, num_prev_days):
   tnum = len(builds)
   num = len(failing_build_urls)
   numRunsToExamine = tnum
-  logging.info(THERE ARE  + str(num) +  builds (out of  + str(tnum)
+  if not summary_mode:
+logging.info(THERE ARE  + str(num) +  builds (out of  + str(tnum)
   + ) that have failed tests in the past  + str(num_prev_days) +  days
   + ((., , as listed below:\n)[num  0]))
 
@@ -165,17 +185,20 @@ def find_flaky_tests(jenkins_url, job_name, 
num_prev_days):
 
 ts = float(failed_build_with_time[1]) / 1000.
 st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
-logging.info(===%s % str(testReport) +  ( + st + ))
+if not summary_mode:
+  logging.info(===%s % str(testReport) +  ( + st + ))
 failing 

[35/50] [abbrv] hadoop git commit: YARN-3617. Fix WindowsResourceCalculatorPlugin.getCpuFrequency() returning always -1. Contributed by J.Andreina.

2015-06-18 Thread zjshen
YARN-3617. Fix WindowsResourceCalculatorPlugin.getCpuFrequency() returning
always -1. Contributed by J.Andreina.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/70281247
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/70281247
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/70281247

Branch: refs/heads/YARN-2928
Commit: 70281247d913b19523c4d0b27b4631004a3a38fd
Parents: b0b8fcb
Author: Devaraj K deva...@apache.org
Authored: Wed Jun 17 13:54:09 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:00 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/70281247/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3679bf8..b1659a4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -630,6 +630,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3714. AM proxy filter can not get RM webapp address from
 yarn.resourcemanager.hostname.rm-id. (Masatake Iwasaki via xgong)
 
+YARN-3617. Fix WindowsResourceCalculatorPlugin.getCpuFrequency()
+returning always -1. (J.Andreina via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70281247/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java
index 0e89118..cdbf525 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java
@@ -157,7 +157,7 @@ public class WindowsResourceCalculatorPlugin extends 
ResourceCalculatorPlugin {
   @Override
   public long getCpuFrequency() {
 refreshIfNeeded();
-return -1;
+return cpuFrequencyKhz;
   }
 
   /** {@inheritDoc} */



[30/50] [abbrv] hadoop git commit: HDFS-4660. Block corruption can happen during pipeline recovery. Contributed by Kihwal Lee.

2015-06-18 Thread zjshen
HDFS-4660. Block corruption can happen during pipeline recovery. Contributed by 
Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5097681
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5097681
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5097681

Branch: refs/heads/YARN-2928
Commit: b5097681edbadec0f860a6bbcc6672a3c3169404
Parents: 5e962f6
Author: Kihwal Lee kih...@apache.org
Authored: Tue Jun 16 15:39:46 2015 -0500
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:18:58 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hdfs/server/datanode/BlockReceiver.java | 126 ++-
 2 files changed, 96 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5097681/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 42588cc..b921f2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1026,6 +1026,8 @@ Release 2.7.1 - UNRELEASED
 
 HDFS-8597. Fix TestFSImage#testZeroBlockSize on Windows. (Xiaoyu Yao)
 
+HDFS-4660. Block corruption can happen during pipeline recovery (kihwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5097681/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index c46892d..2468f43 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -588,29 +588,59 @@ class BlockReceiver implements Closeable {
   try {
 long onDiskLen = replicaInfo.getBytesOnDisk();
 if (onDiskLenoffsetInBlock) {
-  //finally write to the disk :
-  
-  if (onDiskLen % bytesPerChecksum != 0) { 
-// prepare to overwrite last checksum
-adjustCrcFilePosition();
+  // Normally the beginning of an incoming packet is aligned with the
+  // existing data on disk. If the beginning packet data offset is not
+  // checksum chunk aligned, the end of packet will not go beyond the
+  // next chunk boundary.
+  // When a failure-recovery is involved, the client state and the
+  // the datanode state may not exactly agree. I.e. the client may
+  // resend part of data that is already on disk. Correct number of
+  // bytes should be skipped when writing the data and checksum
+  // buffers out to disk.
+  long partialChunkSizeOnDisk = onDiskLen % bytesPerChecksum;
+  boolean alignedOnDisk = partialChunkSizeOnDisk == 0;
+  boolean alignedInPacket = firstByteInBlock % bytesPerChecksum == 0;
+
+  // Since data is always appended, not overwritten, partial CRC
+  // recalculation is necessary if the on-disk data is not chunk-
+  // aligned, regardless of whether the beginning of the data in
+  // the packet is chunk-aligned.
+  boolean doPartialCrc = !alignedOnDisk  !shouldNotWriteChecksum;
+
+  // If this is a partial chunk, then verify that this is the only
+  // chunk in the packet. If the starting offset is not chunk
+  // aligned, the packet should terminate at or before the next
+  // chunk boundary.
+  if (!alignedInPacket  len  bytesPerChecksum) {
+throw new IOException(Unexpected packet data length for 
++  block +  from  + inAddr + : a partial chunk must be 
++  sent in an individual packet (data length =  + len
++bytesPerChecksum =  + bytesPerChecksum + ));
   }
-  
-  // If this is a partial chunk, then read in pre-existing checksum
+
+  // If the last portion of the block file is not a full chunk,
+  // then read in pre-existing partial data chunk and recalculate
+  // the checksum so that the checksum calculation can continue
+  // from the right state.
   Checksum partialCrc = null;
-  if (!shouldNotWriteChecksum  firstByteInBlock % bytesPerChecksum 
!= 0) {
+  if (doPartialCrc) {
 if 

[23/50] [abbrv] hadoop git commit: YARN-3789. Improve logs for LeafQueue#activateApplications(). Contributed by Bibin A Chundatt.

2015-06-18 Thread zjshen
YARN-3789. Improve logs for LeafQueue#activateApplications(). Contributed
by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f28dc4f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f28dc4f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f28dc4f0

Branch: refs/heads/YARN-2928
Commit: f28dc4f030f6f25f4a8525be92cca632c4b4e860
Parents: 388f1ff
Author: Devaraj K deva...@apache.org
Authored: Tue Jun 16 14:03:22 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:07 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt|  3 +++
 .../scheduler/capacity/LeafQueue.java  | 17 +
 2 files changed, 12 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f28dc4f0/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 99f2c64..92060ae 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -406,6 +406,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3785. Support for Resource as an argument during submitApp call in 
MockRM
 test class. (Sunil G via xgong)
 
+YARN-3789. Improve logs for LeafQueue#activateApplications().
+(Bibin A Chundatt via devaraj)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f28dc4f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 15d3289..8e39133 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import org.apache.hadoop.yarn.security.AccessType;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
@@ -604,7 +603,7 @@ public class LeafQueue extends AbstractCSQueue {
 for (IteratorFiCaSchedulerApp i=pendingApplications.iterator(); 
  i.hasNext(); ) {
   FiCaSchedulerApp application = i.next();
-  
+  ApplicationId applicationId = application.getApplicationId();
   // Check am resource limit
   Resource amIfStarted = 
 Resources.add(application.getAMResource(), queueUsage.getAMUsed());
@@ -624,7 +623,9 @@ public class LeafQueue extends AbstractCSQueue {
  single application in queue, it is likely set too low. +
  skipping enforcement to allow at least one application to 
start); 
 } else {
-  LOG.info(not starting application as amIfStarted exceeds amLimit);
+  LOG.info(Not activating application  + applicationId
+  +  as  amIfStarted:  + amIfStarted +  exceeds amLimit: 
+  + amLimit);
   continue;
 }
   }
@@ -645,8 +646,9 @@ public class LeafQueue extends AbstractCSQueue {
  single application in queue for user, it is likely set too low. 
+
  skipping enforcement to allow at least one application to 
start); 
 } else {
-  LOG.info(not starting application as amIfStarted exceeds  +
-userAmLimit);
+  LOG.info(Not activating application  + applicationId
+  +  for user:  + user +  as userAmIfStarted: 
+  + userAmIfStarted +  exceeds userAmLimit:  + userAMLimit);
   continue;
 }
   }
@@ -657,9 +659,8 @@ public class LeafQueue extends AbstractCSQueue {
   

[50/50] [abbrv] hadoop git commit: HDFS-8605. Merge Refactor of DFSOutputStream from HDFS-7285 branch. (vinayakumarb)

2015-06-18 Thread zjshen
HDFS-8605. Merge Refactor of DFSOutputStream from HDFS-7285 branch. 
(vinayakumarb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c036a14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c036a14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c036a14

Branch: refs/heads/YARN-2928
Commit: 8c036a14e3b53ea398d154b0ab4d7eb7b43d59f1
Parents: d6e632a
Author: Andrew Wang w...@apache.org
Authored: Thu Jun 18 08:48:09 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:03 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 59 ++--
 .../org/apache/hadoop/hdfs/DataStreamer.java|  7 ++-
 3 files changed, 40 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c036a14/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2545bcf..a61cf78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -656,6 +656,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6249. Output AclEntry in PBImageXmlWriter.
 (surendra singh lilhore via aajisaka)
 
+HDFS-8605. Merge Refactor of DFSOutputStream from HDFS-7285 branch.
+(vinayakumarb via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c036a14/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 4622be6..c16aef2 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -64,6 +64,8 @@ import org.apache.hadoop.util.Time;
 import org.apache.htrace.Sampler;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -86,6 +88,7 @@ import com.google.common.base.Preconditions;
 @InterfaceAudience.Private
 public class DFSOutputStream extends FSOutputSummer
 implements Syncable, CanSetDropBehind {
+  static final Logger LOG = LoggerFactory.getLogger(DFSOutputStream.class);
   /**
* Number of times to retry creating a file when there are transient 
* errors (typically related to encryption zones and KeyProvider operations).
@@ -413,21 +416,30 @@ public class DFSOutputStream extends FSOutputSummer
 //
 if (currentPacket.getNumChunks() == currentPacket.getMaxChunks() ||
 getStreamer().getBytesCurBlock() == blockSize) {
-  if (DFSClient.LOG.isDebugEnabled()) {
-DFSClient.LOG.debug(DFSClient writeChunk packet full seqno= +
-currentPacket.getSeqno() +
-, src= + src +
-, bytesCurBlock= + getStreamer().getBytesCurBlock() +
-, blockSize= + blockSize +
-, appendChunk= + getStreamer().getAppendChunk());
-  }
-  getStreamer().waitAndQueuePacket(currentPacket);
-  currentPacket = null;
+  enqueueCurrentPacketFull();
+}
+  }
 
-  adjustChunkBoundary();
+  void enqueueCurrentPacket() throws IOException {
+getStreamer().waitAndQueuePacket(currentPacket);
+currentPacket = null;
+  }
 
-  endBlock();
-}
+  void enqueueCurrentPacketFull() throws IOException {
+LOG.debug(enqueue full {}, src={}, bytesCurBlock={}, blockSize={},
++  appendChunk={}, {}, currentPacket, src, getStreamer()
+.getBytesCurBlock(), blockSize, getStreamer().getAppendChunk(),
+getStreamer());
+enqueueCurrentPacket();
+adjustChunkBoundary();
+endBlock();
+  }
+
+  /** create an empty packet to mark the end of the block. */
+  void setCurrentPacketToEmpty() throws InterruptedIOException {
+currentPacket = createPacket(0, 0, getStreamer().getBytesCurBlock(),
+getStreamer().getAndIncCurrentSeqno(), true);
+currentPacket.setSyncBlock(shouldSyncBlock);
   }
 
   /**
@@ -457,11 +469,8 @@ public class DFSOutputStream extends FSOutputSummer
*/
   protected void endBlock() throws IOException {
 if (getStreamer().getBytesCurBlock() == blockSize) {
-  currentPacket = createPacket(0, 0, getStreamer().getBytesCurBlock(),
-  

[32/50] [abbrv] hadoop git commit: HDFS-8606. Cleanup DFSOutputStream by removing unwanted changes from HDFS-8386. Contributed by Rakesh R

2015-06-18 Thread zjshen
HDFS-8606. Cleanup DFSOutputStream by removing unwanted changes from HDFS-8386. 
Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0e7365d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0e7365d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0e7365d

Branch: refs/heads/YARN-2928
Commit: e0e7365dcd65d4b74704c96546ee37a5bb6a4f07
Parents: 3146ab1
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Tue Jun 16 18:08:29 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:18:59 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../main/java/org/apache/hadoop/hdfs/DFSOutputStream.java   | 9 +
 2 files changed, 4 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0e7365d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2d4c062..a36e047 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -635,6 +635,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8540.  Mover should exit with NO_MOVE_BLOCK if no block can be moved.
 (surendra singh lilhore via szetszwo)
 
+HDFS-8606. Cleanup DFSOutputStream by removing unwanted changes
+from HDFS-8386. (Rakesh R via szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0e7365d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 695e6da..4622be6 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -899,16 +899,9 @@ public class DFSOutputStream extends FSOutputSummer
   }
 
   /**
-   * Set the data streamer object.
-   */
-  protected synchronized void setStreamer(DataStreamer streamer) {
-this.streamer = streamer;
-  }
-
-  /**
* Returns the data streamer object.
*/
-  protected synchronized DataStreamer getStreamer() {
+  protected DataStreamer getStreamer() {
 return streamer;
   }
 }



[31/50] [abbrv] hadoop git commit: HDFS-7164. Feature documentation for HDFS-6581. (Contributed by Arpit Agarwal)

2015-06-18 Thread zjshen
HDFS-7164. Feature documentation for HDFS-6581. (Contributed by Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0b8fcb4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0b8fcb4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0b8fcb4

Branch: refs/heads/YARN-2928
Commit: b0b8fcb4056151a0228dca496b63bbaa807969c1
Parents: e0e7365
Author: Arpit Agarwal a...@apache.org
Authored: Tue Jun 16 19:05:44 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:18:59 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../site/markdown/CentralizedCacheManagement.md |   2 +
 .../src/site/markdown/MemoryStorage.md  | 130 +++
 .../site/resources/images/LazyPersistWrites.png | Bin 0 - 107161 bytes
 hadoop-project/src/site/site.xml|   1 +
 5 files changed, 135 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0b8fcb4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a36e047..07cd4a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -638,6 +638,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8606. Cleanup DFSOutputStream by removing unwanted changes
 from HDFS-8386. (Rakesh R via szetszwo)
 
+HDFS-7164. Feature documentation for HDFS-6581. (Arpit Agarwal)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0b8fcb4/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index b4f08c8..72c125d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -233,6 +233,8 @@ Be sure to configure the following:
 
 This determines the maximum amount of memory a DataNode will use for 
caching. On Unix-like systems, the locked-in-memory size ulimit (`ulimit -l`) 
of the DataNode user also needs to be increased to match this parameter (see 
below section on [OS Limits](#OS_Limits)). When setting this value, please 
remember that you will need space in memory for other things as well, such as 
the DataNode and application JVM heaps and the operating system page cache.
 
+This setting is shared with the [Lazy Persist Writes 
feature](./MemoryStorage.html). The Data Node will ensure that the combined 
memory used by Lazy Persist Writes and Centralized Cache Management does not 
exceed the amount configured in `dfs.datanode.max.locked.memory`.
+
  Optional
 
 The following properties are not required, but may be specified for tuning:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0b8fcb4/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/MemoryStorage.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/MemoryStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/MemoryStorage.md
new file mode 100644
index 000..1ed4b5a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/MemoryStorage.md
@@ -0,0 +1,130 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+Memory Storage Support in HDFS
+==
+
+* [Introduction](#Introduction)
+* [Administrator Configuration](#Administrator_Configuration)
+* [Limit RAM used for replicas in 
Memory](#Limit_RAM_used_for_replicas_in_Memory)
+* [Setup RAM Disks on Data Nodes](#Setup_RAM_Disks_on_Data_Nodes)
+* [Choosing `tmpfs` \(vs `ramfs`\)](#Choosing_`tmpfs`_\(vs_`ramfs`\))
+* [Mount RAM Disks](#Mount_RAM_Disks)
+* [Tag `tmpfs` volume with the RAM\_DISK Storage 
Type](#Tag_`tmpfs`_volume_with_the_RAM\_DISK_Storage_Type)
+* [Ensure 

[43/50] [abbrv] hadoop git commit: HDFS-8446. Separate safemode related operations in GetBlockLocations(). Contributed by Haohui Mai.

2015-06-18 Thread zjshen
HDFS-8446. Separate safemode related operations in GetBlockLocations(). 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/563aa169
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/563aa169
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/563aa169

Branch: refs/heads/YARN-2928
Commit: 563aa1695fb08a4a42482733dc323221c4dab583
Parents: c033209
Author: Haohui Mai whe...@apache.org
Authored: Wed Jun 17 16:21:37 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:01 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSDirAttrOp.java   |   3 +-
 .../server/namenode/FSDirStatAndListingOp.java  |  93 +--
 .../hdfs/server/namenode/FSDirectory.java   |  20 +++
 .../hdfs/server/namenode/FSNamesystem.java  | 153 +++
 .../hdfs/server/namenode/NamenodeFsck.java  |   5 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |  16 +-
 7 files changed, 140 insertions(+), 153 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/563aa169/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6ef405b..6dfcd18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -647,6 +647,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8238. Move ClientProtocol to the hdfs-client.
 (Takanobu Asanuma via wheat9)
 
+HDFS-8446. Separate safemode related operations in GetBlockLocations().
+(wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563aa169/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 3b07320..b322b69 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -474,8 +474,7 @@ public class FSDirAttrOp {
 
   // if the last access time update was within the last precision 
interval, then
   // no need to store access time
-  if (atime = inodeTime + fsd.getFSNamesystem().getAccessTimePrecision()
-   !force) {
+  if (atime = inodeTime + fsd.getAccessTimePrecision()  !force) {
 status =  false;
   } else {
 inode.setAccessTime(atime, latest);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563aa169/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index c636d93..201dabc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
@@ -43,6 +45,8 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Arrays;
 
+import static org.apache.hadoop.util.Time.now;

[46/50] [abbrv] hadoop git commit: YARN-3824. Fix two minor nits in member variable properties of YarnConfiguration. Contributed by Ray Chiang.

2015-06-18 Thread zjshen
YARN-3824. Fix two minor nits in member variable properties of
YarnConfiguration. Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6e632a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6e632a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6e632a7

Branch: refs/heads/YARN-2928
Commit: d6e632a7c5c90963c7a3852c7be808150361cc55
Parents: e922a27
Author: Devaraj K deva...@apache.org
Authored: Thu Jun 18 16:44:08 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:02 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6e632a7/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6bf0620..a4ffa03 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -636,6 +636,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3617. Fix WindowsResourceCalculatorPlugin.getCpuFrequency()
 returning always -1. (J.Andreina via devaraj)
 
+YARN-3824. Fix two minor nits in member variable properties
+of YarnConfiguration. (Ray Chiang via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6e632a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 9011a5a..23e6b11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -279,7 +279,7 @@ public class YarnConfiguration extends Configuration {
   + intermediate-data-encryption.enable;
 
   @Private
-  public static final Boolean DEFAULT_YARN_INTERMEDIATE_DATA_ENCRYPTION = 
false;
+  public static final boolean DEFAULT_YARN_INTERMEDIATE_DATA_ENCRYPTION = 
false;
 
   /** The address of the RM admin interface.*/
   public static final String RM_ADMIN_ADDRESS = 
@@ -760,7 +760,7 @@ public class YarnConfiguration extends Configuration {
 
   public static final String RM_PROXY_USER_PRIVILEGES_ENABLED = RM_PREFIX
   + proxy-user-privileges.enabled;
-  public static boolean DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED = false;
+  public static final boolean DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED = false;
 
   /**
* How many diagnostics/failure messages can be saved in RM for



[04/50] [abbrv] hadoop git commit: HDFS-7923. The DataNodes should rate-limit their full block reports by asking the NN on heartbeat messages (cmccabe)

2015-06-18 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7460011/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index 92c329e..ff70c3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -109,7 +109,7 @@ public class TestDeadDatanode {
 BlockListAsLongs.EMPTY) };
 try {
   dnp.blockReport(reg, poolId, report,
-  new BlockReportContext(1, 0, System.nanoTime()));
+  new BlockReportContext(1, 0, System.nanoTime(), 0L));
   fail(Expected IOException is not thrown);
 } catch (IOException ex) {
   // Expected
@@ -120,8 +120,8 @@ public class TestDeadDatanode {
 StorageReport[] rep = { new StorageReport(
 new DatanodeStorage(reg.getDatanodeUuid()),
 false, 0, 0, 0, 0) };
-DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null)
-.getCommands();
+DatanodeCommand[] cmd =
+dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
 assertEquals(1, cmd.length);
 assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
 .getAction());



[22/50] [abbrv] hadoop git commit: HDFS-8591. Remove support for deprecated configuration key dfs.namenode.decommission.nodes.per.interval.

2015-06-18 Thread zjshen
HDFS-8591. Remove support for deprecated configuration key 
dfs.namenode.decommission.nodes.per.interval.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/090aa6ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/090aa6ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/090aa6ce

Branch: refs/heads/YARN-2928
Commit: 090aa6ce3a9f0c59834caba4a975ce5282beda5e
Parents: f28dc4f
Author: Andrew Wang w...@apache.org
Authored: Tue Jun 16 10:03:34 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:07 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../blockmanagement/DecommissionManager.java| 42 +---
 .../apache/hadoop/hdfs/TestDecommission.java| 26 
 3 files changed, 12 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/090aa6ce/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0ae2882..e0ef52f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -31,6 +31,9 @@ Trunk (Unreleased)
  
 HDFS-8135. Remove the deprecated FSConstants class. (Li Lu via wheat9)
 
+HDFS-8591. Remove support for deprecated configuration key
+dfs.namenode.decommission.nodes.per.interval. (wang)
+
   NEW FEATURES
 
 HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/090aa6ce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 639d2b0..797d031 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -136,29 +136,20 @@ public class DecommissionManager {
 checkArgument(intervalSecs = 0, Cannot set a negative  +
 value for  + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY);
 
-// By default, the new configuration key overrides the deprecated one.
-// No # node limit is set.
 int blocksPerInterval = conf.getInt(
 DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
 DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT);
-int nodesPerInterval = Integer.MAX_VALUE;
 
-// If the expected key isn't present and the deprecated one is, 
-// use the deprecated one into the new one. This overrides the 
-// default.
-//
-// Also print a deprecation warning.
 final String deprecatedKey =
 dfs.namenode.decommission.nodes.per.interval;
 final String strNodes = conf.get(deprecatedKey);
 if (strNodes != null) {
-  nodesPerInterval = Integer.parseInt(strNodes);
-  blocksPerInterval = Integer.MAX_VALUE;
-  LOG.warn(Using deprecated configuration key {} value of {}.,
-  deprecatedKey, nodesPerInterval); 
+  LOG.warn(Deprecated configuration key {} will be ignored.,
+  deprecatedKey);
   LOG.warn(Please update your configuration to use {} instead., 
   DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
 }
+
 checkArgument(blocksPerInterval  0,
 Must set a positive value for 
 + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
@@ -170,15 +161,14 @@ public class DecommissionManager {
 value for 
 + 
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES);
 
-monitor = new Monitor(blocksPerInterval, 
-nodesPerInterval, maxConcurrentTrackedNodes);
+monitor = new Monitor(blocksPerInterval, maxConcurrentTrackedNodes);
 executor.scheduleAtFixedRate(monitor, intervalSecs, intervalSecs,
 TimeUnit.SECONDS);
 
 LOG.debug(Activating DecommissionManager with interval {} seconds,  +
-{} max blocks per interval, {} max nodes per interval,  +
+{} max blocks per interval,  +
 {} max concurrently tracked nodes., intervalSecs,
-blocksPerInterval, nodesPerInterval, maxConcurrentTrackedNodes);
+blocksPerInterval, maxConcurrentTrackedNodes);
   }
 
   /**
@@ -334,10 +324,6 @@ public class DecommissionManager {
  */
 

[27/50] [abbrv] hadoop git commit: HDFS-8597. Fix TestFSImage#testZeroBlockSize on Windows. Contributed by Xiaoyu Yao.

2015-06-18 Thread zjshen
HDFS-8597. Fix TestFSImage#testZeroBlockSize on Windows. Contributed by Xiaoyu 
Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12b46e18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12b46e18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12b46e18

Branch: refs/heads/YARN-2928
Commit: 12b46e18e0094c46ca60e7f0ec4f333d4029a96c
Parents: 090aa6c
Author: Xiaoyu Yao x...@apache.org
Authored: Tue Jun 16 12:38:07 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:18:58 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 2 ++
 .../org/apache/hadoop/hdfs/server/datanode/StorageLocation.java  | 4 ++--
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12b46e18/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e0ef52f..42588cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1024,6 +1024,8 @@ Release 2.7.1 - UNRELEASED
 HDFS-8576.  Lease recovery should return true if the lease can be released
 and the file can be closed.  (J.Andreina via szetszwo)
 
+HDFS-8597. Fix TestFSImage#testZeroBlockSize on Windows. (Xiaoyu Yao)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12b46e18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
index 126086f..5c8dd85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
@@ -26,8 +26,8 @@ import java.net.URI;
 import java.util.regex.Matcher;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -94,7 +94,7 @@ public class StorageLocation {
   }
 }
 
-return new StorageLocation(storageType, Util.stringAsURI(location));
+return new StorageLocation(storageType, new Path(location).toUri());
   }
 
   @Override



[12/50] [abbrv] hadoop git commit: HDFS-8572. DN always uses HTTP/localhost@REALM principals in SPNEGO. Contributed by Haohui Mai.

2015-06-18 Thread zjshen
HDFS-8572. DN always uses HTTP/localhost@REALM principals in SPNEGO. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6ff0e81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6ff0e81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6ff0e81

Branch: refs/heads/YARN-2928
Commit: d6ff0e81d6cf8ecd256be526efcd6538e60f17bf
Parents: bc9d48a
Author: Haohui Mai whe...@apache.org
Authored: Thu Jun 11 18:53:29 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:05 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   | 36 ++
 .../server/datanode/web/DatanodeHttpServer.java | 52 ++--
 3 files changed, 55 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6ff0e81/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 033451e..51a0897 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -996,6 +996,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8583. Document that NFS gateway does not work with rpcbind
 on SLES 11. (Arpit Agarwal)
 
+HDFS-8572. DN always uses HTTP/localhost@REALM principals in SPNEGO.
+(wheat9)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6ff0e81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index f73eb66..ed2925b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
+
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
@@ -148,7 +148,6 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import 
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
@@ -163,7 +162,6 @@ import 
org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.ReadaheadPool;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -180,7 +178,6 @@ import org.apache.hadoop.security.SaslPropertiesResolver;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.tracing.SpanReceiverHost;
@@ -299,7 +296,6 @@ public class DataNode extends ReconfigurableBase
   private volatile boolean heartbeatsDisabledForTests = false;
   private DataStorage storage = null;
 
-  private HttpServer2 infoServer = null;
   private DatanodeHttpServer httpServer = null;
   private int infoPort;
   private int infoSecurePort;
@@ -761,29 +757,12 @@ public class DataNode extends ReconfigurableBase
*/
   private void startInfoServer(Configuration conf)
 throws IOException {
-Configuration confForInfoServer = new Configuration(conf);
-confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
-HttpServer2.Builder builder = new HttpServer2.Builder()
-  .setName(datanode)
-  

[38/50] [abbrv] hadoop git commit: HDFS-8238. Move ClientProtocol to the hdfs-client. Contributed by Takanobu Asanuma.

2015-06-18 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2ccade/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
deleted file mode 100644
index 55faf16..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ /dev/null
@@ -1,1459 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.List;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.crypto.CryptoProtocolVersion;
-import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
-import org.apache.hadoop.fs.CacheFlag;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.fs.InvalidPathException;
-import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.Options.Rename;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.UnresolvedLinkException;
-import org.apache.hadoop.fs.XAttr;
-import org.apache.hadoop.fs.XAttrSetFlag;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.inotify.EventBatchList;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
-import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
-import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
-import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
-import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
-import org.apache.hadoop.io.EnumSetWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.retry.AtMostOnce;
-import org.apache.hadoop.io.retry.Idempotent;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenInfo;
-
-/**
- * ClientProtocol is used by user code via 
- * {@link org.apache.hadoop.hdfs.DistributedFileSystem} class to communicate 
- * with the NameNode.  User code can manipulate the directory namespace, 
- * as well as open/close file streams, etc.
- *
- **/
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-@KerberosInfo(
-serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
-@TokenInfo(DelegationTokenSelector.class)
-public interface ClientProtocol {
-
-  /**
-   * Until version 69, this class ClientProtocol served as both
-   * the client interface to the NN AND the RPC protocol used to 
-   * communicate with the NN.
-   * 
-   * This class is used by both the DFSClient and the 
-   * NN server side to insulate from the protocol serialization.
-   * 
-   * If you are adding/changing this interface then you need to 
-   * change both this class and ALSO related protocol buffer
-   * wire protocol definition in ClientNamenodeProtocol.proto.
-   * 
-   * For more details on protocol buffer wire protocol, please see 
-   * .../org/apache/hadoop/hdfs/protocolPB/overview.html
-   * 
-   * The log of historical changes can be retrieved from the 

[18/50] [abbrv] hadoop git commit: HDFS-8361. Choose SSD over DISK in block placement.

2015-06-18 Thread zjshen
HDFS-8361. Choose SSD over DISK in block placement.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e24417a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e24417a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e24417a

Branch: refs/heads/YARN-2928
Commit: 5e24417a5c01654e818e59940a73aa960c3d5f0d
Parents: b181b87
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Mon Jun 15 17:12:01 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:06 2015 -0700

--
 .../java/org/apache/hadoop/fs/StorageType.java  |  7 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../hadoop/hdfs/TestBlockStoragePolicy.java | 75 +++-
 3 files changed, 80 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e24417a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java
index 68069d7..0948801 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java
@@ -33,10 +33,11 @@ import org.apache.hadoop.util.StringUtils;
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
 public enum StorageType {
-  DISK(false),
+  // sorted by the speed of the storage types, from fast to slow
+  RAM_DISK(true),
   SSD(false),
-  ARCHIVE(false),
-  RAM_DISK(true);
+  DISK(false),
+  ARCHIVE(false);
 
   private final boolean isTransient;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e24417a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9822575..79e7820 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -920,6 +920,8 @@ Release 2.7.1 - UNRELEASED
 HDFS-8521. Add VisibleForTesting annotation to
 BlockPoolSlice#selectReplicaToDelete. (cmccabe)
 
+HDFS-8361. Choose SSD over DISK in block placement.  (szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e24417a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index ea69f97..0d59ded 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -26,6 +26,7 @@ import java.util.*;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -40,7 +41,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
-import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
@@ -1153,6 +1154,50 @@ public class TestBlockStoragePolicy {
   }
 
   @Test
+  public void testChooseSsdOverDisk() throws Exception {
+BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, TEST1,
+new StorageType[]{StorageType.SSD, StorageType.DISK,
+StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
+
+final String[] racks = {/d1/r1, /d1/r1, /d1/r1};
+final String[] hosts = {host1, host2, host3};
+final StorageType[] disks = {StorageType.DISK, StorageType.DISK, 
StorageType.DISK};
+
+final DatanodeStorageInfo[] diskStorages
+= DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
+final DatanodeDescriptor[] dataNodes
+= DFSTestUtil.toDatanodeDescriptor(diskStorages);
+for(int i = 0; i  dataNodes.length; i++) {
+  BlockManagerTestUtil.updateStorage(dataNodes[i],
+  new 

[29/50] [abbrv] hadoop git commit: HADOOP-12095. org.apache.hadoop.fs.shell.TestCount fails. Contributed by Brahma Reddy Battula.

2015-06-18 Thread zjshen
HADOOP-12095. org.apache.hadoop.fs.shell.TestCount fails. Contributed by Brahma 
Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e962f68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e962f68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e962f68

Branch: refs/heads/YARN-2928
Commit: 5e962f682228937995fa6539d3f086e317073378
Parents: 12b46e1
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue Jun 16 13:38:03 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:18:58 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/test/java/org/apache/hadoop/fs/shell/TestCount.java  | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e962f68/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 11c2f2a..9a16a7d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -858,6 +858,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12001. Fixed LdapGroupsMapping to include configurable Posix UID and
 GID attributes during the search. (Patrick White via vinodkv)
 
+HADOOP-12095. org.apache.hadoop.fs.shell.TestCount fails.
+(Brahma Reddy Battula via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e962f68/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
index 44fc1e6..116c071 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
@@ -281,7 +281,7 @@ public class TestCount {
 count.processOptions(options);
 String withStorageTypeHeader =
 // 13--- ---17-- 13- --17---
-   DISK_QUOTAREM_DISK_QUOTA SSD_QUOTA REM_SSD_QUOTA  +
+SSD_QUOTA REM_SSD_QUOTADISK_QUOTAREM_DISK_QUOTA  +
 // 13--- ---17--
 ARCHIVE_QUOTA REM_ARCHIVE_QUOTA  +
 PATHNAME;
@@ -335,8 +335,8 @@ public class TestCount {
 count.processOptions(options);
 String withStorageTypeHeader =
 // 13--- ---17--
-   DISK_QUOTAREM_DISK_QUOTA  +
 SSD_QUOTA REM_SSD_QUOTA  +
+   DISK_QUOTAREM_DISK_QUOTA  +
 ARCHIVE_QUOTA REM_ARCHIVE_QUOTA  +
 PATHNAME;
 verify(out).println(withStorageTypeHeader);



[39/50] [abbrv] hadoop git commit: HDFS-8238. Move ClientProtocol to the hdfs-client. Contributed by Takanobu Asanuma.

2015-06-18 Thread zjshen
HDFS-8238. Move ClientProtocol to the hdfs-client. Contributed by Takanobu 
Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb2ccade
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb2ccade
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb2ccade

Branch: refs/heads/YARN-2928
Commit: eb2ccade523c3f51268a22414ce9e260f0ae792a
Parents: 5a4ccbd
Author: Haohui Mai whe...@apache.org
Authored: Wed Jun 17 16:16:36 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:00 2015 -0700

--
 .../hdfs/client/HdfsClientConfigKeys.java   |1 +
 .../hadoop/hdfs/protocol/ClientProtocol.java| 1459 ++
 .../hdfs/server/namenode/SafeModeException.java |   38 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |3 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java| 1459 --
 .../hdfs/server/namenode/SafeModeException.java |   38 -
 6 files changed, 1500 insertions(+), 1498 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2ccade/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 26283aa..e6d579b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -39,6 +39,7 @@ public interface HdfsClientConfigKeys {
   String  DFS_NAMENODE_HTTPS_ADDRESS_KEY = dfs.namenode.https-address;
   String DFS_HA_NAMENODES_KEY_PREFIX = dfs.ha.namenodes;
   int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
+  String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY = 
dfs.namenode.kerberos.principal;
 
   /** dfs.client.retry configuration properties */
   interface Retry {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2ccade/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
new file mode 100644
index 000..7e27078
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -0,0 +1,1459 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
+import org.apache.hadoop.fs.CacheFlag;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.InvalidPathException;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import 

[44/50] [abbrv] hadoop git commit: HDFS-8589. Fix unused imports in BPServiceActor and BlockReportLeaseManager (cmccabe)

2015-06-18 Thread zjshen
HDFS-8589. Fix unused imports in BPServiceActor and BlockReportLeaseManager 
(cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59171b44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59171b44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59171b44

Branch: refs/heads/YARN-2928
Commit: 59171b44df5ce8e7b81d2500534db47d4cc06e87
Parents: 563aa16
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Jun 17 17:01:42 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:01 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hdfs/server/blockmanagement/BlockReportLeaseManager.java  | 1 -
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java| 1 -
 3 files changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59171b44/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6dfcd18..ef3530f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -650,6 +650,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8446. Separate safemode related operations in GetBlockLocations().
 (wheat9)
 
+HDFS-8589. Fix unused imports in BPServiceActor and BlockReportLeaseManager
+(cmccabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59171b44/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java
index cd037f5..7db05c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59171b44/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index ea1abbd..f84dd99 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -29,7 +29,6 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import com.google.common.base.Joiner;



[19/50] [abbrv] hadoop git commit: HDFS-8576. Lease recovery should return true if the lease can be released and the file can be closed. Contributed by J.Andreina

2015-06-18 Thread zjshen
HDFS-8576.  Lease recovery should return true if the lease can be released and 
the file can be closed.  Contributed by J.Andreina


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e1a876b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e1a876b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e1a876b

Branch: refs/heads/YARN-2928
Commit: 9e1a876b1305f374379ab64d6cc3adffb30b4549
Parents: 0eb0d6e
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Mon Jun 15 16:07:38 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:06 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 22 ++
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 46 
 3 files changed, 62 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e1a876b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c98d918..21acf98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1011,6 +1011,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8595. TestCommitBlockSynchronization fails in branch-2.7. (Patch
 applies to all branches). (Arpit Agarwal)
 
+HDFS-8576.  Lease recovery should return true if the lease can be released
+and the file can be closed.  (J.Andreina via szetszwo)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e1a876b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index f962373..518adb4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2616,7 +2616,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
* @param src the path of the file to start lease recovery
* @param holder the lease holder's name
* @param clientMachine the client machine's name
-   * @return true if the file is already closed
+   * @return true if the file is already closed or
+   * if the lease can be released and the file can be closed.
* @throws IOException
*/
   boolean recoverLease(String src, String holder, String clientMachine)
@@ -2643,7 +2644,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 dir.checkPathAccess(pc, iip, FsAction.WRITE);
   }
   
-  recoverLeaseInternal(RecoverLeaseOp.RECOVER_LEASE,
+  return recoverLeaseInternal(RecoverLeaseOp.RECOVER_LEASE,
   iip, src, holder, clientMachine, true);
 } catch (StandbyException se) {
   skipSync = true;
@@ -2656,7 +2657,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 getEditLog().logSync();
   }
 }
-return false;
   }
 
   enum RecoverLeaseOp {
@@ -2672,12 +2672,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 }
   }
 
-  void recoverLeaseInternal(RecoverLeaseOp op, INodesInPath iip,
+  boolean recoverLeaseInternal(RecoverLeaseOp op, INodesInPath iip,
   String src, String holder, String clientMachine, boolean force)
   throws IOException {
 assert hasWriteLock();
 INodeFile file = iip.getLastINode().asFile();
-if (file != null  file.isUnderConstruction()) {
+if (file.isUnderConstruction()) {
   //
   // If the file is under construction , then it must be in our
   // leases. Find the appropriate lease record.
@@ -2710,7 +2710,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 // close only the file src
 LOG.info(recoverLease:  + lease + , src= + src +
from client  + clientName);
-internalReleaseLease(lease, src, iip, holder);
+return internalReleaseLease(lease, src, iip, holder);
   } else {
 assert lease.getHolder().equals(clientName) :
   Current lease holder  + lease.getHolder() +
@@ -2722,11 +2722,13 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 if (lease.expiredSoftLimit()) {
   LOG.info(startFile: recover  + lease + , src= + src +  client 
   

[34/50] [abbrv] hadoop git commit: HADOOP-12076. Incomplete Cache Mechanism in CredentialProvider API. Contributed by Larry McCay.

2015-06-18 Thread zjshen
HADOOP-12076. Incomplete Cache Mechanism in CredentialProvider API. Contributed 
by Larry McCay.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3146ab13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3146ab13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3146ab13

Branch: refs/heads/YARN-2928
Commit: 3146ab13fe4d9f5288fa4da9a3a2a53fc965d01f
Parents: 984e8be
Author: cnauroth cnaur...@apache.org
Authored: Tue Jun 16 14:44:03 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:18:59 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../security/alias/AbstractJavaKeyStoreProvider.java | 15 +--
 2 files changed, 4 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3146ab13/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9a16a7d..67e9c76 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -861,6 +861,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12095. org.apache.hadoop.fs.shell.TestCount fails.
 (Brahma Reddy Battula via aajisaka)
 
+HADOOP-12076. Incomplete Cache Mechanism in CredentialProvider API.
+(Larry McCay via cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3146ab13/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
index 76b8cd5..9656261 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
@@ -41,9 +41,7 @@ import java.security.UnrecoverableKeyException;
 import java.security.cert.CertificateException;
 import java.util.ArrayList;
 import java.util.Enumeration;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -174,13 +172,6 @@ public abstract class AbstractJavaKeyStoreProvider extends 
CredentialProvider {
 return keyStore;
   }
 
-  public MapString, CredentialEntry getCache() {
-return cache;
-  }
-
-  private final MapString, CredentialEntry cache =
-  new HashMapString, CredentialEntry();
-
   protected final String getPathAsString() {
 return getPath().toString();
   }
@@ -213,9 +204,6 @@ public abstract class AbstractJavaKeyStoreProvider extends 
CredentialProvider {
 try {
   SecretKeySpec key = null;
   try {
-if (cache.containsKey(alias)) {
-  return cache.get(alias);
-}
 if (!keyStore.containsAlias(alias)) {
   return null;
 }
@@ -269,7 +257,7 @@ public abstract class AbstractJavaKeyStoreProvider extends 
CredentialProvider {
   throws IOException {
 writeLock.lock();
 try {
-  if (keyStore.containsAlias(alias) || cache.containsKey(alias)) {
+  if (keyStore.containsAlias(alias)) {
 throw new IOException(Credential  + alias +  already exists in 
 + this);
   }
@@ -296,7 +284,6 @@ public abstract class AbstractJavaKeyStoreProvider extends 
CredentialProvider {
   } catch (KeyStoreException e) {
 throw new IOException(Problem removing  + name +  from  + this, e);
   }
-  cache.remove(name);
   changed = true;
 } finally {
   writeLock.unlock();



[20/50] [abbrv] hadoop git commit: MAPREDUCE-6396. TestPipeApplication fails by NullPointerException. Contributed by Brahma Reddy Battula.

2015-06-18 Thread zjshen
MAPREDUCE-6396. TestPipeApplication fails by NullPointerException. Contributed 
by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0eb0d6e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0eb0d6e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0eb0d6e0

Branch: refs/heads/YARN-2928
Commit: 0eb0d6e072a6f483a650bd8df034bf789ef360a6
Parents: 9fd568b
Author: Akira Ajisaka aajis...@apache.org
Authored: Mon Jun 15 15:28:31 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:06 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt| 3 +++
 .../org/apache/hadoop/mapred/pipes/TestPipeApplication.java | 5 +++--
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eb0d6e0/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 3c2e8f9..5b66604 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -221,6 +221,9 @@ Trunk (Unreleased)
 MAPREDUCE-6343. JobConf.parseMaximumHeapSizeMB() fails to parse value 
 greater than 2GB expressed in bytes. (Hao Xia via kasha)
 
+MAPREDUCE-6396. TestPipeApplication fails by NullPointerException.
+(Brahma Reddy Battula via aajisaka)
+
   BREAKDOWN OF MAPREDUCE-2841 (NATIVE TASK) SUBTASKS
 
 MAPREDUCE-5985. native-task: Fix build on macosx. Contributed by

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eb0d6e0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
index 64fdf41..22c5f41 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
@@ -36,6 +36,7 @@ import java.util.Map.Entry;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.io.BooleanWritable;
@@ -94,7 +95,7 @@ public class TestPipeApplication {
   CombineOutputCollectorIntWritable, Text output = new 
CombineOutputCollectorIntWritable, Text(
   new Counters.Counter(), new Progress());
   FileSystem fs = new RawLocalFileSystem();
-  fs.setConf(conf);
+  fs.initialize(FsConstants.LOCAL_FS_URI, conf);
   WriterIntWritable, Text wr = new WriterIntWritable, Text(conf, 
fs.create(
   new Path(workSpace + File.separator + outfile)), 
IntWritable.class,
   Text.class, null, null, true);
@@ -176,7 +177,7 @@ public class TestPipeApplication {
   FakeCollector output = new FakeCollector(new Counters.Counter(),
   new Progress());
   FileSystem fs = new RawLocalFileSystem();
-  fs.setConf(conf);
+  fs.initialize(FsConstants.LOCAL_FS_URI, conf);
   WriterIntWritable, Text wr = new WriterIntWritable, Text(conf, 
fs.create(
   new Path(workSpace.getAbsolutePath() + File.separator + 
outfile)),
   IntWritable.class, Text.class, null, null, true);



[26/50] [abbrv] hadoop git commit: YARN-3714. AM proxy filter can not get RM webapp address from yarn.resourcemanager.hostname.rm-id. Contributed by Masatake Iwasaki

2015-06-18 Thread zjshen
YARN-3714. AM proxy filter can not get RM webapp address from
yarn.resourcemanager.hostname.rm-id. Contributed by Masatake Iwasaki


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76dc2f53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76dc2f53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76dc2f53

Branch: refs/heads/YARN-2928
Commit: 76dc2f535343a806db5cc86f2d270c7d00412bee
Parents: 816ab87
Author: Xuan xg...@apache.org
Authored: Tue Jun 16 14:05:09 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:18:58 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../org/apache/hadoop/yarn/util/RMHAUtils.java  | 33 --
 .../amfilter/TestAmFilterInitializer.java   | 47 
 3 files changed, 70 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76dc2f53/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 92060ae..3679bf8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -627,6 +627,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3794. TestRMEmbeddedElector fails because of ambiguous LOG reference.
 (Chengbing Liu via devaraj)
 
+YARN-3714. AM proxy filter can not get RM webapp address from
+yarn.resourcemanager.hostname.rm-id. (Masatake Iwasaki via xgong)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76dc2f53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RMHAUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RMHAUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RMHAUtils.java
index a7e1ce9..2e996e9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RMHAUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RMHAUtils.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceTarget;
 import org.apache.hadoop.yarn.client.RMHAServiceTarget;
+import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
 @Private
@@ -71,25 +72,31 @@ public class RMHAUtils {
 
   public static ListString getRMHAWebappAddresses(
   final YarnConfiguration conf) {
+String prefix;
+String defaultPort;
+if (YarnConfiguration.useHttps(conf)) {
+  prefix = YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS;
+  defaultPort = : + YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT;
+} else {
+  prefix =YarnConfiguration.RM_WEBAPP_ADDRESS;
+  defaultPort = : + YarnConfiguration.DEFAULT_RM_WEBAPP_PORT;
+}
 CollectionString rmIds =
 conf.getStringCollection(YarnConfiguration.RM_HA_IDS);
 ListString addrs = new ArrayListString();
-if (YarnConfiguration.useHttps(conf)) {
-  for (String id : rmIds) {
-String addr = conf.get(
-YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + . + id);
-if (addr != null) {
-  addrs.add(addr);
+for (String id : rmIds) {
+  String addr = conf.get(HAUtil.addSuffix(prefix, id));
+  if (addr == null) {
+String hostname =
+conf.get(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, id));
+if (hostname != null) {
+  addr = hostname + defaultPort;
 }
   }
-} else {
-  for (String id : rmIds) {
-String addr = conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS + . + id);
-if (addr != null) {
-  addrs.add(addr);
-}
+  if (addr != null) {
+addrs.add(addr);
   }
 }
 return addrs;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76dc2f53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java

[45/50] [abbrv] hadoop git commit: HADOOP-7139. Allow appending to existing SequenceFiles (Contributed by kanaka kumar avvaru)

2015-06-18 Thread zjshen
HADOOP-7139. Allow appending to existing SequenceFiles (Contributed by kanaka 
kumar avvaru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e922a278
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e922a278
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e922a278

Branch: refs/heads/YARN-2928
Commit: e922a27884e2e12ed62f60bf65972a3bf555e7a6
Parents: 05d63e6
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Jun 18 14:39:00 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:02 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/io/SequenceFile.java |  85 -
 .../hadoop/io/TestSequenceFileAppend.java   | 311 +++
 3 files changed, 394 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e922a278/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4b1b382..3430da6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -651,6 +651,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11965. determine-flaky-tests needs a summary mode.
 (Yufei Gu via Yongjun Zhang)
 
+HADOOP-7139. Allow appending to existing SequenceFiles
+(kanaka kumar avvaru via vinayakumarb)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e922a278/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index 7a59149..e37e855 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -837,7 +837,9 @@ public class SequenceFile {
 DataOutputStream deflateOut = null;
 Metadata metadata = null;
 Compressor compressor = null;
-
+
+private boolean appendMode = false;
+
 protected Serializer keySerializer;
 protected Serializer uncompressedValSerializer;
 protected Serializer compressedValSerializer;
@@ -909,6 +911,13 @@ public class SequenceFile {
   }
 }
 
+static class AppendIfExistsOption extends Options.BooleanOption implements
+Option {
+  AppendIfExistsOption(boolean value) {
+super(value);
+  }
+}
+
 static class KeyClassOption extends Options.ClassOption implements Option {
   KeyClassOption(Class? value) {
 super(value);
@@ -958,7 +967,7 @@ public class SequenceFile {
 return codec;
   }
 }
-
+
 public static Option file(Path value) {
   return new FileOption(value);
 }
@@ -984,6 +993,10 @@ public class SequenceFile {
   return new ReplicationOption(value);
 }
 
+public static Option appendIfExists(boolean value) {
+  return new AppendIfExistsOption(value);
+}
+
 public static Option blockSize(long value) {
   return new BlockSizeOption(value);
 }
@@ -1030,6 +1043,8 @@ public class SequenceFile {
   ProgressableOption progressOption = 
 Options.getOption(ProgressableOption.class, opts);
   FileOption fileOption = Options.getOption(FileOption.class, opts);
+  AppendIfExistsOption appendIfExistsOption = Options.getOption(
+  AppendIfExistsOption.class, opts);
   FileSystemOption fsOption = Options.getOption(FileSystemOption.class, 
opts);
   StreamOption streamOption = Options.getOption(StreamOption.class, opts);
   KeyClassOption keyClassOption = 
@@ -1071,7 +1086,54 @@ public class SequenceFile {
   blockSizeOption.getValue();
 Progressable progress = progressOption == null ? null :
   progressOption.getValue();
-out = fs.create(p, true, bufferSize, replication, blockSize, progress);
+
+if (appendIfExistsOption != null  appendIfExistsOption.getValue()
+ fs.exists(p)) {
+
+  // Read the file and verify header details
+  SequenceFile.Reader reader = new SequenceFile.Reader(conf,
+  SequenceFile.Reader.file(p), new Reader.OnlyHeaderOption());
+  try {
+
+if (keyClassOption.getValue() != reader.getKeyClass()
+|| valueClassOption.getValue() != reader.getValueClass()) {
+  throw new 

[24/50] [abbrv] hadoop git commit: YARN-3711. Documentation of ResourceManager HA should explain configurations about listen addresses. Contributed by Masatake Iwasaki.

2015-06-18 Thread zjshen
YARN-3711. Documentation of ResourceManager HA should explain configurations 
about listen addresses. Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5032eb97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5032eb97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5032eb97

Branch: refs/heads/YARN-2928
Commit: 5032eb979bc3c7b3d8b9ab9388f47bc6676fca8f
Parents: 5e24417
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Jun 16 10:12:38 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:07 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../src/site/markdown/ResourceManagerHA.md  | 16 +++-
 2 files changed, 18 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5032eb97/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1776a89..99f2c64 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -644,6 +644,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3723. Need to clearly document primaryFilter and otherInfo value type.
 (Zhijie Shen via xgong)
 
+YARN-3711. Documentation of ResourceManager HA should explain 
configurations
+about listen addresses. (Masatake Iwasaki via ozawa)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5032eb97/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
index 596cba7..49669a1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
@@ -67,7 +67,13 @@ Most of the failover functionality is tunable using various 
configuration proper
 | `yarn.resourcemanager.zk-address` | Address of the ZK-quorum. Used both for 
the state-store and embedded leader-election. |
 | `yarn.resourcemanager.ha.enabled` | Enable RM HA. |
 | `yarn.resourcemanager.ha.rm-ids` | List of logical IDs for the RMs. e.g., 
rm1,rm2. |
-| `yarn.resourcemanager.hostname.*rm-id*` | For each *rm-id*, specify the 
hostname the RM corresponds to. Alternately, one could set each of the RM's 
service addresses. |
+| `yarn.resourcemanager.hostname.`*rm-id* | For each *rm-id*, specify the 
hostname the RM corresponds to. Alternately, one could set each of the RM's 
service addresses. |
+| `yarn.resourcemanager.address.`*rm-id* | For each *rm-id*, specify host:port 
for clients to submit jobs. If set, overrides the hostname set in 
`yarn.resourcemanager.hostname.`*rm-id*. |
+| `yarn.resourcemanager.scheduler.address.`*rm-id* | For each *rm-id*, specify 
scheduler host:port for ApplicationMasters to obtain resources. If set, 
overrides the hostname set in `yarn.resourcemanager.hostname.`*rm-id*. |
+| `yarn.resourcemanager.resource-tracker.address.`*rm-id* | For each *rm-id*, 
specify host:port for NodeManagers to connect. If set, overrides the hostname 
set in `yarn.resourcemanager.hostname.`*rm-id*. |
+| `yarn.resourcemanager.admin.address.`*rm-id* | For each *rm-id*, specify 
host:port for administrative commands. If set, overrides the hostname set in 
`yarn.resourcemanager.hostname.`*rm-id*. |
+| `yarn.resourcemanager.webapp.address.`*rm-id* | For each *rm-id*, specify 
host:port of the RM web application corresponds to. You do not need this if you 
set `yarn.http.policy` to `HTTPS_ONLY`. If set, overrides the hostname set in 
`yarn.resourcemanager.hostname.`*rm-id*. |
+| `yarn.resourcemanager.webapp.https.address.`*rm-id* | For each *rm-id*, 
specify host:port of the RM https web application corresponds to. You do not 
need this if you set `yarn.http.policy` to `HTTP_ONLY`. If set, overrides the 
hostname set in `yarn.resourcemanager.hostname.`*rm-id*. |
 | `yarn.resourcemanager.ha.id` | Identifies the RM in the ensemble. This is 
optional; however, if set, admins have to ensure that all the RMs have their 
own IDs in the config. |
 | `yarn.resourcemanager.ha.automatic-failover.enabled` | Enable automatic 
failover; By default, it is enabled only when HA is enabled. |
 | `yarn.resourcemanager.ha.automatic-failover.embedded` | Use embedded 
leader-elector to pick the Active RM, when automatic failover is enabled. By 
default, it is enabled only when HA is enabled. |
@@ -105,6 +111,14 @@ Here is the sample of minimal 

[48/50] [abbrv] hadoop git commit: HDFS-6249. Output AclEntry in PBImageXmlWriter. Contributed by surendra singh lilhore.

2015-06-18 Thread zjshen
HDFS-6249. Output AclEntry in PBImageXmlWriter. Contributed by surendra singh 
lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19551cf5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19551cf5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19551cf5

Branch: refs/heads/YARN-2928
Commit: 19551cf5f984ed4c5c63a27dd3619fdfd84c99c1
Parents: c181680
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Jun 17 17:41:10 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:02 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../offlineImageViewer/PBImageXmlWriter.java| 20 +---
 .../TestOfflineImageViewerForAcl.java   | 24 
 3 files changed, 44 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19551cf5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8f563de..2545bcf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -653,6 +653,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8589. Fix unused imports in BPServiceActor and BlockReportLeaseManager
 (cmccabe)
 
+HDFS-6249. Output AclEntry in PBImageXmlWriter.
+(surendra singh lilhore via aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19551cf5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
index f3fe886..4415c5d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
@@ -29,6 +29,7 @@ import java.util.Comparator;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.AclEntry;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
@@ -41,6 +42,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
+import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection;
@@ -51,7 +53,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection;
 import org.apache.hadoop.hdfs.util.XMLUtils;
 import org.apache.hadoop.util.LimitInputStream;
-
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 
 /**
@@ -188,7 +190,7 @@ public final class PBImageXmlWriter {
   private void dumpINodeDirectory(INodeDirectory d) {
 o(mtime, d.getModificationTime()).o(permission,
 dumpPermission(d.getPermission()));
-
+dumpAcls(d.getAcl());
 if (d.hasDsQuota()  d.hasNsQuota()) {
   o(nsquota, d.getNsQuota()).o(dsquota, d.getDsQuota());
 }
@@ -242,7 +244,7 @@ public final class PBImageXmlWriter {
 .o(atime, f.getAccessTime())
 .o(perferredBlockSize, f.getPreferredBlockSize())
 .o(permission, dumpPermission(f.getPermission()));
-
+dumpAcls(f.getAcl());
 if (f.getBlocksCount()  0) {
   out.print(blocks);
   for (BlockProto b : f.getBlocksList()) {
@@ -263,6 +265,18 @@ public final class PBImageXmlWriter {
 }
   }
 
+  private void dumpAcls(AclFeatureProto aclFeatureProto) {
+ImmutableListAclEntry 

[09/50] [abbrv] hadoop git commit: HADOOP-12001. Moving CHANGES.txt up into 2.8.

2015-06-18 Thread zjshen
HADOOP-12001. Moving CHANGES.txt up into 2.8.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77bbe951
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77bbe951
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77bbe951

Branch: refs/heads/YARN-2928
Commit: 77bbe95131f96b145da6e7839cfe8b725b19cf2c
Parents: 68cc034
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Mon Jun 15 14:29:07 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:05 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77bbe951/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index cdd396f..11c2f2a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -855,6 +855,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12074. in Shell.java#runCommand() rethrow InterruptedException as
 InterruptedIOException (Lavkesh Lahngir via vinayakumarb)
 
+HADOOP-12001. Fixed LdapGroupsMapping to include configurable Posix UID and
+GID attributes during the search. (Patrick White via vinodkv)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -899,9 +902,6 @@ Release 2.7.1 - UNRELEASED
 HADOOP-12078. The default retry policy does not handle RetriableException
 correctly. (Arpit Agarwal)
 
-HADOOP-12001. Fixed LdapGroupsMapping to include configurable Posix UID and
-GID attributes during the search. (Patrick White via vinodkv)
-
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



[15/50] [abbrv] hadoop git commit: HDFS-8596. TestDistributedFileSystem et al tests are broken in branch-2 due to incorrect setting of datanode attribute. Contributed by Yongjun Zhang.

2015-06-18 Thread zjshen
HDFS-8596. TestDistributedFileSystem et al tests are broken in branch-2 due to 
incorrect setting of datanode attribute. Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b93ab3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b93ab3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b93ab3d

Branch: refs/heads/YARN-2928
Commit: 2b93ab3d26ce275e5f35bfc7d116abb4588d9249
Parents: fb7f8ec
Author: Yongjun Zhang yzh...@cloudera.com
Authored: Sun Jun 14 11:20:32 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:10:05 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 2 +-
 .../hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java   | 3 ++-
 3 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b93ab3d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9aabd34..1caa8c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1002,6 +1002,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8572. DN always uses HTTP/localhost@REALM principals in SPNEGO.
 (wheat9)
 
+HDFS-8596. TestDistributedFileSystem et al tests are broken in branch-2
+due to incorrect setting of datanode attribute. (Yongjun Zhang)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b93ab3d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index ed2925b..3bd131e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -762,7 +762,7 @@ public class DataNode extends ReconfigurableBase
 ServerSocketChannel httpServerChannel = secureResources != null ?
 secureResources.getHttpServerChannel() : null;
 
-this.httpServer = new DatanodeHttpServer(conf, httpServerChannel);
+this.httpServer = new DatanodeHttpServer(conf, this, httpServerChannel);
 httpServer.start();
 if (httpServer.getHttpAddress() != null) {
   infoPort = httpServer.getHttpAddress().getPort();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b93ab3d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index e9ad92f..62c98e7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -75,6 +75,7 @@ public class DatanodeHttpServer implements Closeable {
   static final Log LOG = LogFactory.getLog(DatanodeHttpServer.class);
 
   public DatanodeHttpServer(final Configuration conf,
+  final DataNode datanode,
   final ServerSocketChannel externalHttpChannel)
 throws IOException {
 this.conf = conf;
@@ -91,7 +92,7 @@ public class DatanodeHttpServer implements Closeable {
 
 this.infoServer = builder.build();
 
-this.infoServer.setAttribute(datanode, this);
+this.infoServer.setAttribute(datanode, datanode);
 this.infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
 this.infoServer.addServlet(null, /blockScannerReport,
BlockScanner.Servlet.class);



[41/50] [abbrv] hadoop git commit: Update CHANGES.txt for HDFS-8238.

2015-06-18 Thread zjshen
Update CHANGES.txt for HDFS-8238.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c10bcc06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c10bcc06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c10bcc06

Branch: refs/heads/YARN-2928
Commit: c10bcc0618eb502eeabb33a0fc6ae49aac1b01dc
Parents: eb2ccad
Author: Haohui Mai whe...@apache.org
Authored: Wed Jun 17 16:19:45 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:01 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c10bcc06/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a01446a..6ef405b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -644,6 +644,9 @@ Release 2.8.0 - UNRELEASED
 of Block in UnderReplicatedBlocks and PendingReplicationBlocks).
 (Zhe Zhang via wang)
 
+HDFS-8238. Move ClientProtocol to the hdfs-client.
+(Takanobu Asanuma via wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than



[42/50] [abbrv] hadoop git commit: YARN-3804. Both RM are on standBy state when kerberos user not in yarn.admin.acl. Contributed by Varun Saxena

2015-06-18 Thread zjshen
YARN-3804. Both RM are on standBy state when kerberos user not in 
yarn.admin.acl. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c033209e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c033209e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c033209e

Branch: refs/heads/YARN-2928
Commit: c033209e1af3999dd44dfa2f598f2b45cd271a72
Parents: c10bcc0
Author: Xuan xg...@apache.org
Authored: Wed Jun 17 16:23:27 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu Jun 18 11:19:01 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../server/resourcemanager/AdminService.java| 19 +---
 .../resourcemanager/TestRMAdminService.java | 49 +++-
 3 files changed, 63 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c033209e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ae9716c..6bf0620 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -781,6 +781,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3764. CapacityScheduler should forbid moving LeafQueue from one parent
 to another. (Wangda Tan via jianhe)
 
+YARN-3804. Both RM are on standBy state when kerberos user not in 
yarn.admin.acl
+(Varun Saxena via xgong)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c033209e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 1ee8b3b..e5bb6e5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -112,6 +112,8 @@ public class AdminService extends CompositeService 
implements
   private final RecordFactory recordFactory = 
 RecordFactoryProvider.getRecordFactory(null);
 
+  private UserGroupInformation daemonUser;
+
   @VisibleForTesting
   boolean isDistributedNodeLabelConfiguration = false;
 
@@ -138,10 +140,9 @@ public class AdminService extends CompositeService 
implements
 YarnConfiguration.RM_ADMIN_ADDRESS,
 YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
 YarnConfiguration.DEFAULT_RM_ADMIN_PORT);
+daemonUser = UserGroupInformation.getCurrentUser();
 authorizer = YarnAuthorizationProvider.getInstance(conf);
-authorizer.setAdmins(new AccessControlList(conf.get(
-  YarnConfiguration.YARN_ADMIN_ACL,
-YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)), UserGroupInformation
+authorizer.setAdmins(getAdminAclList(conf), UserGroupInformation
 .getCurrentUser());
 rmId = conf.get(YarnConfiguration.RM_HA_ID);
 
@@ -151,6 +152,14 @@ public class AdminService extends CompositeService 
implements
 super.serviceInit(conf);
   }
 
+  private AccessControlList getAdminAclList(Configuration conf) {
+AccessControlList aclList = new AccessControlList(conf.get(
+YarnConfiguration.YARN_ADMIN_ACL,
+YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
+aclList.addUser(daemonUser.getShortUserName());
+return aclList;
+  }
+
   @Override
   protected void serviceStart() throws Exception {
 startServer();
@@ -470,9 +479,7 @@ public class AdminService extends CompositeService 
implements
 Configuration conf =
 getConfiguration(new Configuration(false),
 YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
-authorizer.setAdmins(new AccessControlList(conf.get(
-  YarnConfiguration.YARN_ADMIN_ACL,
-YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)), UserGroupInformation
+authorizer.setAdmins(getAdminAclList(conf), UserGroupInformation
 .getCurrentUser());
 RMAuditLogger.logSuccess(user.getShortUserName(), argName,
 AdminService);


[2/2] hadoop git commit: YARN-3706. Generalize native HBase writer for additional tables (Joep Rottinghuis via sjlee)

2015-06-18 Thread sjlee
YARN-3706. Generalize native HBase writer for additional tables (Joep 
Rottinghuis via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9137aeae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9137aeae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9137aeae

Branch: refs/heads/YARN-2928
Commit: 9137aeae0dec83f9eff40d12cae712dfd508c0c5
Parents: a1bb913
Author: Sangjin Lee sj...@apache.org
Authored: Thu Jun 18 10:49:20 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Thu Jun 18 10:49:20 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../storage/EntityColumnDetails.java| 110 --
 .../storage/EntityColumnFamily.java |  95 -
 .../storage/HBaseTimelineWriterImpl.java| 114 +++---
 .../server/timelineservice/storage/Range.java   |  59 
 .../storage/TimelineEntitySchemaConstants.java  |  71 
 .../storage/TimelineSchemaCreator.java  | 134 +---
 .../timelineservice/storage/TimelineWriter.java |   3 +-
 .../storage/TimelineWriterUtils.java| 344 ---
 .../storage/common/BaseTable.java   | 118 +++
 .../common/BufferedMutatorDelegator.java|  73 
 .../timelineservice/storage/common/Column.java  |  59 
 .../storage/common/ColumnFamily.java|  34 ++
 .../storage/common/ColumnHelper.java| 247 +
 .../storage/common/ColumnPrefix.java|  83 +
 .../timelineservice/storage/common/Range.java   |  59 
 .../storage/common/Separator.java   | 303 
 .../common/TimelineEntitySchemaConstants.java   |  68 
 .../storage/common/TimelineWriterUtils.java | 127 +++
 .../storage/common/TypedBufferedMutator.java|  28 ++
 .../storage/common/package-info.java|  24 ++
 .../storage/entity/EntityColumn.java| 141 
 .../storage/entity/EntityColumnFamily.java  |  65 
 .../storage/entity/EntityColumnPrefix.java  | 212 
 .../storage/entity/EntityRowKey.java|  93 +
 .../storage/entity/EntityTable.java | 161 +
 .../storage/entity/package-info.java|  25 ++
 .../storage/TestHBaseTimelineWriterImpl.java| 252 --
 .../storage/common/TestSeparator.java   | 129 +++
 .../storage/common/TestTimelineWriterUtils.java |  29 ++
 30 files changed, 2301 insertions(+), 962 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9137aeae/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 040afea..197a154 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -93,6 +93,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3276. Code cleanup for timeline service API records. (Junping Du via
 zjshen)
 
+YARN-3706. Generalize native HBase writer for additional tables (Joep
+Rottinghuis via sjlee)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9137aeae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
deleted file mode 100644
index 2894c41..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/EntityColumnDetails.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 

[1/2] hadoop git commit: YARN-3706. Generalize native HBase writer for additional tables (Joep Rottinghuis via sjlee)

2015-06-18 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 a1bb9137a - 9137aeae0


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9137aeae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
new file mode 100644
index 000..ee57890
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
@@ -0,0 +1,303 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Used to separate row qualifiers, column qualifiers and compount fields.
+ */
+public enum Separator {
+
+  /**
+   * separator in key or column qualifier fields
+   */
+  QUALIFIERS(!, %0$),
+
+  /**
+   * separator in values, and/or compound key/column qualifier fields.
+   */
+  VALUES(?, %1$),
+
+  /**
+   * separator in values, often used to avoid having these in qualifiers and
+   * names. Note that if we use HTML form encoding through URLEncoder, we end 
up
+   * getting a + for a space, which may already occur in strings, so we don't
+   * want that.
+   */
+  SPACE( , %2$);
+
+  /**
+   * The string value of this separator.
+   */
+  private final String value;
+
+  /**
+   * The URLEncoded version of this separator
+   */
+  private final String encodedValue;
+
+  /**
+   * The bye representation of value.
+   */
+  private final byte[] bytes;
+
+  /**
+   * The value quoted so that it can be used as a safe regex
+   */
+  private final String quotedValue;
+
+  private static final byte[] EMPTY_BYTES = new byte[0];
+
+  /**
+   * @param value of the separator to use. Cannot be null or empty string.
+   * @param encodedValue choose something that isn't likely to occur in the 
data
+   *  itself. Cannot be null or empty string.
+   */
+  private Separator(String value, String encodedValue) {
+this.value = value;
+this.encodedValue = encodedValue;
+
+// validation
+if (value == null || value.length() == 0 || encodedValue == null
+|| encodedValue.length() == 0) {
+  throw new IllegalArgumentException(
+  Cannot create separator from null or empty string.);
+}
+
+this.bytes = Bytes.toBytes(value);
+this.quotedValue = Pattern.quote(value);
+  }
+
+  /**
+   * Used to make token safe to be used with this separator without collisions.
+   *
+   * @param token
+   * @return the token with any occurrences of this separator URLEncoded.
+   */
+  public String encode(String token) {
+if (token == null || token.length() == 0) {
+  // Nothing to replace
+  return token;
+}
+return token.replace(value, encodedValue);
+  }
+
+  /**
+   * @param token
+   * @return the token with any occurrences of the encoded separator replaced 
by
+   * the separator itself.
+   */
+  public String decode(String token) {
+if (token == null || token.length() == 0) {
+  // Nothing to replace
+  return token;
+}
+return token.replace(encodedValue, value);
+  }
+
+  /**
+   * Encode the given separators in the token with their encoding equivalent.
+   * This means that when encoding is already present in the token itself, this
+   * is not a reversible process. See also {@link #decode(String, 
Separator...)}
+   *
+   * @param token containing possible separators that need to be encoded.
+   * @param separators to be encoded in the token with their URLEncoding
+   *  

hadoop git commit: YARN-3802. Two RMNodes for the same NodeId are used in RM sometimes after NM is reconnected. Contributed by zhihai xu

2015-06-18 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6e0a9f92f - 5b5bb8dcd


YARN-3802. Two RMNodes for the same NodeId are used in RM sometimes
after NM is reconnected. Contributed by zhihai xu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b5bb8dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b5bb8dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b5bb8dc

Branch: refs/heads/trunk
Commit: 5b5bb8dcdc888ba1ebc7e4eba0fa0e7e79edda9a
Parents: 6e0a9f9
Author: Xuan xg...@apache.org
Authored: Thu Jun 18 14:37:49 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Thu Jun 18 14:37:49 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  8 ++-
 .../resourcetracker/TestNMReconnect.java| 67 +++-
 3 files changed, 74 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b5bb8dc/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f00170e..d89c285 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -539,6 +539,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3824. Fix two minor nits in member variable properties
 of YarnConfiguration. (Ray Chiang via devaraj)
 
+YARN-3802. Two RMNodes for the same NodeId are used in RM sometimes
+after NM is reconnected. (zhihai xu via xgong)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b5bb8dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 8a810cb..d1e6190 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -597,10 +597,14 @@ public class RMNodeImpl implements RMNode, 
EventHandlerRMNodeEvent {
 if (rmNode.getHttpPort() == newNode.getHttpPort()) {
   // Reset heartbeat ID since node just restarted.
   rmNode.getLastNodeHeartBeatResponse().setResponseId(0);
+  if (!rmNode.getTotalCapability().equals(
+  newNode.getTotalCapability())) {
+rmNode.totalCapability = newNode.getTotalCapability();
+  }
   if (rmNode.getState().equals(NodeState.RUNNING)) {
-// Only add new node if old state is RUNNING
+// Only add old node if old state is RUNNING
 rmNode.context.getDispatcher().getEventHandler().handle(
-new NodeAddedSchedulerEvent(newNode));
+new NodeAddedSchedulerEvent(rmNode));
   }
 } else {
   // Reconnected node differs, so replace old node and start new node

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b5bb8dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java
index d16d551..b525efc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java
@@ -25,6 +25,9 @@ import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.NodeId;

hadoop git commit: YARN-3802. Two RMNodes for the same NodeId are used in RM sometimes after NM is reconnected. Contributed by zhihai xu

2015-06-18 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2946e92f7 - 86b75ac54


YARN-3802. Two RMNodes for the same NodeId are used in RM sometimes
after NM is reconnected. Contributed by zhihai xu

(cherry picked from commit 5b5bb8dcdc888ba1ebc7e4eba0fa0e7e79edda9a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86b75ac5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86b75ac5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86b75ac5

Branch: refs/heads/branch-2
Commit: 86b75ac54427c547ced194111428e477ba8694b4
Parents: 2946e92
Author: Xuan xg...@apache.org
Authored: Thu Jun 18 14:37:49 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Thu Jun 18 14:38:52 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  8 ++-
 .../resourcetracker/TestNMReconnect.java| 67 +++-
 3 files changed, 74 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86b75ac5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7a43bef..0794905 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -491,6 +491,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3824. Fix two minor nits in member variable properties
 of YarnConfiguration. (Ray Chiang via devaraj)
 
+YARN-3802. Two RMNodes for the same NodeId are used in RM sometimes
+after NM is reconnected. (zhihai xu via xgong)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86b75ac5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 8a810cb..d1e6190 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -597,10 +597,14 @@ public class RMNodeImpl implements RMNode, 
EventHandlerRMNodeEvent {
 if (rmNode.getHttpPort() == newNode.getHttpPort()) {
   // Reset heartbeat ID since node just restarted.
   rmNode.getLastNodeHeartBeatResponse().setResponseId(0);
+  if (!rmNode.getTotalCapability().equals(
+  newNode.getTotalCapability())) {
+rmNode.totalCapability = newNode.getTotalCapability();
+  }
   if (rmNode.getState().equals(NodeState.RUNNING)) {
-// Only add new node if old state is RUNNING
+// Only add old node if old state is RUNNING
 rmNode.context.getDispatcher().getEventHandler().handle(
-new NodeAddedSchedulerEvent(newNode));
+new NodeAddedSchedulerEvent(rmNode));
   }
 } else {
   // Reconnected node differs, so replace old node and start new node

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86b75ac5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java
index d16d551..b525efc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java
@@ -25,6 +25,9 @@ import org.junit.Assert;
 import 

hadoop git commit: MAPREDUCE-6373. The logger reports total input paths but it is referring to input files. Contributed by Bibin A Chundatt.

2015-06-18 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/trunk 74351af3b - 1babe50a2


MAPREDUCE-6373. The logger reports total input paths but it is referring
to input files. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1babe50a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1babe50a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1babe50a

Branch: refs/heads/trunk
Commit: 1babe50a2cbaae3c8165229347e743d0dc94e979
Parents: 74351af
Author: Devaraj K deva...@apache.org
Authored: Thu Jun 18 11:42:22 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Thu Jun 18 11:42:22 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/FileInputFormat.java   | 2 +-
 .../org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java| 3 +--
 3 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1babe50a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index a9f5053..1037091 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -477,6 +477,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple
 mappers. (Brahma Reddy Battula via aajisaka)
 
+MAPREDUCE-6373. The logger reports total input paths but it is referring
+to input files. (Bibin A Chundatt via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1babe50a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
index c6cbd50..2c58ebe 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
@@ -253,7 +253,7 @@ public abstract class FileInputFormatK, V implements 
InputFormatK, V {
   LOG.debug(Time taken to get FileStatuses: 
   + sw.now(TimeUnit.MILLISECONDS));
 }
-LOG.info(Total input paths to process :  + result.length);
+LOG.info(Total input files to process :  + result.length);
 return result;
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1babe50a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
index f5cd5d1..0c5ede9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.mapred.LocatedFileStatusFetcher;
-import org.apache.hadoop.mapred.SplitLocationInfo;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.Job;
@@ -287,7 +286,7 @@ public abstract class FileInputFormatK, V extends 
InputFormatK, V {
   LOG.debug(Time taken to get FileStatuses: 
   + sw.now(TimeUnit.MILLISECONDS));
 }
-LOG.info(Total input paths to process :  + result.size()); 
+LOG.info(Total input files to process :  + result.size());
 return result;
   }
 



hadoop git commit: MAPREDUCE-6373. The logger reports total input paths but it is referring to input files. Contributed by Bibin A Chundatt.

2015-06-18 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8cac3888c - c4c7fd0bf


MAPREDUCE-6373. The logger reports total input paths but it is referring
to input files. Contributed by Bibin A Chundatt.

(cherry picked from commit 1babe50a2cbaae3c8165229347e743d0dc94e979)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4c7fd0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4c7fd0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4c7fd0b

Branch: refs/heads/branch-2
Commit: c4c7fd0bf1e53d046587cbf262e8f06a996abba5
Parents: 8cac388
Author: Devaraj K deva...@apache.org
Authored: Thu Jun 18 11:42:22 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Thu Jun 18 11:45:11 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/FileInputFormat.java   | 2 +-
 .../org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java| 3 +--
 3 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4c7fd0b/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 6d6021a..c66535d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -216,6 +216,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6363. [NNBench] Lease mismatch error when running with multiple
 mappers. (Brahma Reddy Battula via aajisaka)
 
+MAPREDUCE-6373. The logger reports total input paths but it is referring
+to input files. (Bibin A Chundatt via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4c7fd0b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
index c6cbd50..2c58ebe 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
@@ -253,7 +253,7 @@ public abstract class FileInputFormatK, V implements 
InputFormatK, V {
   LOG.debug(Time taken to get FileStatuses: 
   + sw.now(TimeUnit.MILLISECONDS));
 }
-LOG.info(Total input paths to process :  + result.length);
+LOG.info(Total input files to process :  + result.length);
 return result;
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4c7fd0b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
index f5cd5d1..0c5ede9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.mapred.LocatedFileStatusFetcher;
-import org.apache.hadoop.mapred.SplitLocationInfo;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.Job;
@@ -287,7 +286,7 @@ public abstract class FileInputFormatK, V extends 
InputFormatK, V {
   LOG.debug(Time taken to get FileStatuses: 
   + sw.now(TimeUnit.MILLISECONDS));
 }
-LOG.info(Total input paths to process :  + result.size()); 
+LOG.info(Total input files to process :  + result.size());
 return result;
   }
 



[2/2] hadoop git commit: HADOOP-7139. Allow appending to existing SequenceFiles (Contributed by kanaka kumar avvaru)

2015-06-18 Thread vinayakumarb
HADOOP-7139. Allow appending to existing SequenceFiles (Contributed by kanaka 
kumar avvaru)

(cherry picked from commit 295d678be8853a52c3ec3da43d9265478d6632b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80697e4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80697e4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80697e4f

Branch: refs/heads/branch-2
Commit: 80697e4f324948ec32b4cad3faccba55287be652
Parents: c4c7fd0
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Jun 18 14:39:00 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu Jun 18 14:45:07 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/io/SequenceFile.java |  85 -
 .../hadoop/io/TestSequenceFileAppend.java   | 311 +++
 3 files changed, 394 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80697e4f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bf897ec..d0b4e3e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -158,6 +158,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11965. determine-flaky-tests needs a summary mode.
 (Yufei Gu via Yongjun Zhang)
 
+HADOOP-7139. Allow appending to existing SequenceFiles
+(kanaka kumar avvaru via vinayakumarb)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80697e4f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index 0d056b7..e908205 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -837,7 +837,9 @@ public class SequenceFile {
 DataOutputStream deflateOut = null;
 Metadata metadata = null;
 Compressor compressor = null;
-
+
+private boolean appendMode = false;
+
 protected Serializer keySerializer;
 protected Serializer uncompressedValSerializer;
 protected Serializer compressedValSerializer;
@@ -909,6 +911,13 @@ public class SequenceFile {
   }
 }
 
+static class AppendIfExistsOption extends Options.BooleanOption implements
+Option {
+  AppendIfExistsOption(boolean value) {
+super(value);
+  }
+}
+
 static class KeyClassOption extends Options.ClassOption implements Option {
   KeyClassOption(Class? value) {
 super(value);
@@ -958,7 +967,7 @@ public class SequenceFile {
 return codec;
   }
 }
-
+
 public static Option file(Path value) {
   return new FileOption(value);
 }
@@ -984,6 +993,10 @@ public class SequenceFile {
   return new ReplicationOption(value);
 }
 
+public static Option appendIfExists(boolean value) {
+  return new AppendIfExistsOption(value);
+}
+
 public static Option blockSize(long value) {
   return new BlockSizeOption(value);
 }
@@ -1030,6 +1043,8 @@ public class SequenceFile {
   ProgressableOption progressOption = 
 Options.getOption(ProgressableOption.class, opts);
   FileOption fileOption = Options.getOption(FileOption.class, opts);
+  AppendIfExistsOption appendIfExistsOption = Options.getOption(
+  AppendIfExistsOption.class, opts);
   FileSystemOption fsOption = Options.getOption(FileSystemOption.class, 
opts);
   StreamOption streamOption = Options.getOption(StreamOption.class, opts);
   KeyClassOption keyClassOption = 
@@ -1071,7 +1086,54 @@ public class SequenceFile {
   blockSizeOption.getValue();
 Progressable progress = progressOption == null ? null :
   progressOption.getValue();
-out = fs.create(p, true, bufferSize, replication, blockSize, progress);
+
+if (appendIfExistsOption != null  appendIfExistsOption.getValue()
+ fs.exists(p)) {
+
+  // Read the file and verify header details
+  SequenceFile.Reader reader = new SequenceFile.Reader(conf,
+  SequenceFile.Reader.file(p), new Reader.OnlyHeaderOption());
+  try {
+
+if (keyClassOption.getValue() != reader.getKeyClass()
+||