hadoop git commit: HDFS-9210. Fix some misuse of %n in VolumeScanner#printStats. Contributed by Xiaoyu Yao.

2016-02-01 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7bb48ed16 -> 924e1583e


HDFS-9210. Fix some misuse of %n in VolumeScanner#printStats. Contributed by 
Xiaoyu Yao.

(cherry picked from commit b11008d54d8aa3eb9c7c10c3a0734d95b9ef41e9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/924e1583
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/924e1583
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/924e1583

Branch: refs/heads/branch-2.8
Commit: 924e1583ed6dc555dd55bfcb5ed5c08215121631
Parents: 7bb48ed
Author: Xiaoyu Yao 
Authored: Mon Feb 1 11:55:27 2016 -0800
Committer: Xiaoyu Yao 
Committed: Mon Feb 1 11:57:51 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java | 7 +++
 2 files changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/924e1583/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 89c9e96..16c4f55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1690,6 +1690,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames
 (Mingliang Liu via stevel)
 
+HDFS-9210. Fix some misuse of %n in VolumeScanner#printStats.
+(Xiaoyu Yao)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/924e1583/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index 212e13b..d1f2d05 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -216,9 +216,8 @@ public class VolumeScanner extends Thread {
   }
 
   public void printStats(StringBuilder p) {
-p.append("Block scanner information for volume " +
-volume.getStorageID() + " with base path " + volume.getBasePath() +
-"%n");
+p.append(String.format("Block scanner information for volume %s with base" 
+
+" path %s%n", volume.getStorageID(), volume.getBasePath()));
 synchronized (stats) {
   p.append(String.format("Bytes verified in last hour   : %57d%n",
   stats.bytesScannedInPastHour));
@@ -245,7 +244,7 @@ public class VolumeScanner extends Thread {
   stats.lastBlockScanned.toString(;
   p.append(String.format("More blocks to scan in period : %57s%n",
   !stats.eof));
-  p.append("%n");
+  p.append(System.lineSeparator());
 }
   }
 



hadoop git commit: HDFS-9210. Fix some misuse of %n in VolumeScanner#printStats. Contributed by Xiaoyu Yao.

2016-02-01 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f5f665213 -> 7cc3b1c7a


HDFS-9210. Fix some misuse of %n in VolumeScanner#printStats. Contributed by 
Xiaoyu Yao.

(cherry picked from commit b11008d54d8aa3eb9c7c10c3a0734d95b9ef41e9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7cc3b1c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7cc3b1c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7cc3b1c7

Branch: refs/heads/branch-2
Commit: 7cc3b1c7a36b884d57d247d5ba8a92585a73589c
Parents: f5f6652
Author: Xiaoyu Yao 
Authored: Mon Feb 1 11:55:27 2016 -0800
Committer: Xiaoyu Yao 
Committed: Mon Feb 1 11:57:11 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java | 7 +++
 2 files changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cc3b1c7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dfd7e18..227af05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1772,6 +1772,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames
 (Mingliang Liu via stevel)
 
+HDFS-9210. Fix some misuse of %n in VolumeScanner#printStats.
+(Xiaoyu Yao)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cc3b1c7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index 212e13b..d1f2d05 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -216,9 +216,8 @@ public class VolumeScanner extends Thread {
   }
 
   public void printStats(StringBuilder p) {
-p.append("Block scanner information for volume " +
-volume.getStorageID() + " with base path " + volume.getBasePath() +
-"%n");
+p.append(String.format("Block scanner information for volume %s with base" 
+
+" path %s%n", volume.getStorageID(), volume.getBasePath()));
 synchronized (stats) {
   p.append(String.format("Bytes verified in last hour   : %57d%n",
   stats.bytesScannedInPastHour));
@@ -245,7 +244,7 @@ public class VolumeScanner extends Thread {
   stats.lastBlockScanned.toString(;
   p.append(String.format("More blocks to scan in period : %57s%n",
   !stats.eof));
-  p.append("%n");
+  p.append(System.lineSeparator());
 }
   }
 



hadoop git commit: HDFS-9494. Parallel optimization of DFSStripedOutputStream#flushAllInternals. Contributed by Gao Rui.

2016-02-01 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk e50aa53ee -> e30ce01dd


HDFS-9494. Parallel optimization of DFSStripedOutputStream#flushAllInternals. 
Contributed by Gao Rui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e30ce01d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e30ce01d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e30ce01d

Branch: refs/heads/trunk
Commit: e30ce01ddce1cfd1e9d49c4784eb4a6bc87e36ca
Parents: e50aa53
Author: Jing Zhao 
Authored: Mon Feb 1 13:02:58 2016 -0800
Committer: Jing Zhao 
Committed: Mon Feb 1 13:02:58 2016 -0800

--
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 28 
 .../hadoop/hdfs/DFSStripedOutputStream.java | 71 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 3 files changed, 78 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e30ce01d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index f6a8981..1c58b28 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -685,18 +685,7 @@ public class DFSOutputStream extends FSOutputSummer
* received from datanodes.
*/
   protected void flushInternal() throws IOException {
-long toWaitFor;
-synchronized (this) {
-  dfsClient.checkOpen();
-  checkClosed();
-  //
-  // If there is data in the current buffer, send it across
-  //
-  getStreamer().queuePacket(currentPacket);
-  currentPacket = null;
-  toWaitFor = getStreamer().getLastQueuedSeqno();
-}
-
+long toWaitFor = flushInternalWithoutWaitingAck();
 getStreamer().waitForAckedSeqno(toWaitFor);
   }
 
@@ -864,6 +853,21 @@ public class DFSOutputStream extends FSOutputSummer
 return getStreamer().getBlockToken();
   }
 
+  protected long flushInternalWithoutWaitingAck() throws IOException {
+long toWaitFor;
+synchronized (this) {
+  dfsClient.checkOpen();
+  checkClosed();
+  //
+  // If there is data in the current buffer, send it across
+  //
+  getStreamer().queuePacket(currentPacket);
+  currentPacket = null;
+  toWaitFor = getStreamer().getLastQueuedSeqno();
+}
+return toWaitFor;
+  }
+
   @Override
   public void setDropBehind(Boolean dropBehind) throws IOException {
 CachingStrategy prevStrategy, nextStrategy;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e30ce01d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index e1ff844..8292d0a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -34,6 +34,13 @@ import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -245,7 +252,7 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
   private final List streamers;
   private final DFSPacket[] currentPackets; // current Packet of each streamer
 
-  /** Size of each striping cell, must be a multiple of bytesPerChecksum */
+  // Size of each striping cell, must be a multiple of bytesPerChecksum.
   private final int cellSize;
   private final int numAllBlocks;
   private final int numDataBlocks;
@@ -253,6 +260,8 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
   private final String[] favoredNodes;
   private final List failedStreamers;
   private 

hadoop git commit: HDFS-9210. Fix some misuse of %n in VolumeScanner#printStats. Contributed by Xiaoyu Yao.

2016-02-01 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 59a212b6e -> b11008d54


HDFS-9210. Fix some misuse of %n in VolumeScanner#printStats. Contributed by 
Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b11008d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b11008d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b11008d5

Branch: refs/heads/trunk
Commit: b11008d54d8aa3eb9c7c10c3a0734d95b9ef41e9
Parents: 59a212b
Author: Xiaoyu Yao 
Authored: Mon Feb 1 11:55:27 2016 -0800
Committer: Xiaoyu Yao 
Committed: Mon Feb 1 11:55:27 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java | 7 +++
 2 files changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b11008d5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fdf69d9..ee9dcc2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2664,6 +2664,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames
 (Mingliang Liu via stevel)
 
+HDFS-9210. Fix some misuse of %n in VolumeScanner#printStats.
+(Xiaoyu Yao)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b11008d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index 212e13b..d1f2d05 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -216,9 +216,8 @@ public class VolumeScanner extends Thread {
   }
 
   public void printStats(StringBuilder p) {
-p.append("Block scanner information for volume " +
-volume.getStorageID() + " with base path " + volume.getBasePath() +
-"%n");
+p.append(String.format("Block scanner information for volume %s with base" 
+
+" path %s%n", volume.getStorageID(), volume.getBasePath()));
 synchronized (stats) {
   p.append(String.format("Bytes verified in last hour   : %57d%n",
   stats.bytesScannedInPastHour));
@@ -245,7 +244,7 @@ public class VolumeScanner extends Thread {
   stats.lastBlockScanned.toString(;
   p.append(String.format("More blocks to scan in period : %57s%n",
   !stats.eof));
-  p.append("%n");
+  p.append(System.lineSeparator());
 }
   }
 



hadoop git commit: HDFS-9701. DN may deadlock when hot-swapping under load. (Xiao Chen via lei)

2016-02-01 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk b11008d54 -> e50aa53ee


HDFS-9701. DN may deadlock when hot-swapping under load. (Xiao Chen via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e50aa53e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e50aa53e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e50aa53e

Branch: refs/heads/trunk
Commit: e50aa53eed3d0ff1bc8fe60381524bb3bbe53bc1
Parents: b11008d
Author: Lei Xu 
Authored: Mon Feb 1 11:35:02 2016 -0800
Committer: Lei Xu 
Committed: Mon Feb 1 12:56:53 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  2 +
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 23 
 .../datanode/fsdataset/impl/FsVolumeList.java   | 47 -
 .../fsdataset/impl/TestFsDatasetImpl.java   | 55 
 .../fsdataset/impl/TestFsVolumeList.java| 42 ---
 6 files changed, 152 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e50aa53e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ee9dcc2b..1141df1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2667,6 +2667,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-9210. Fix some misuse of %n in VolumeScanner#printStats.
 (Xiaoyu Yao)
 
+HDFS-9701. DN may deadlock when hot-swapping under load. (Xiao Chen via 
lei)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e50aa53e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index cbd4123..bf87346 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -493,6 +493,7 @@ class FsDatasetImpl implements FsDatasetSpi {
   // Disable the volume from the service.
   asyncDiskService.removeVolume(sd.getCurrentDir());
   volumes.removeVolume(absRoot, clearFailure);
+  volumes.waitVolumeRemoved(5000, this);
 
   // Removed all replica information for the blocks on the volume.
   // Unlike updating the volumeMap in addVolume(), this operation does
@@ -1769,6 +1770,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
* 
* @throws IOException   May be thrown from the methods 
called. 
*/
+  @Override // FsDatasetSpi
   public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
   throws ReplicaNotFoundException, UnexpectedReplicaStateException,
   FileNotFoundException, EOFException, IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e50aa53e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 6b79073..86f03c2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -235,29 +235,30 @@ public class FsVolumeImpl implements FsVolumeSpi {
   }
 
   /**
-   * Close this volume and wait all other threads to release the reference 
count
-   * on this volume.
-   * @throws IOException if the volume is closed or the waiting is interrupted.
+   * Close this volume.
+   * @throws IOException if the volume is closed.
*/
-  void closeAndWait() throws IOException {
+  void setClosed() throws IOException {
 try {
   this.reference.setClosed();
 } catch (ClosedChannelException e) {
   throw new IOException("The volume has already closed.", e);
 }
-final int SLEEP_MILLIS = 

hadoop git commit: HDFS-9406. FSImage may get corrupted after deleting snapshot. (Contributed by Jing Zhao, Stanislav Antic, Vinayakumar B, Yongjun Zhang)

2016-02-01 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk e30ce01dd -> 34ab50ea9


HDFS-9406. FSImage may get corrupted after deleting snapshot. (Contributed by 
Jing Zhao, Stanislav Antic, Vinayakumar B, Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34ab50ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34ab50ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34ab50ea

Branch: refs/heads/trunk
Commit: 34ab50ea92370cc7440a8f7649286b148c2fde65
Parents: e30ce01
Author: Yongjun Zhang 
Authored: Mon Feb 1 11:23:44 2016 -0800
Committer: Yongjun Zhang 
Committed: Mon Feb 1 13:56:55 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  2 +-
 .../hdfs/server/namenode/INodeReference.java|  5 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  | 11 ++-
 .../hdfs/server/namenode/TestINodeFile.java |  3 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 95 ++--
 6 files changed, 107 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34ab50ea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 18716e0..f77451b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2733,6 +2733,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.
 (szetszwo)
 
+HDFS-9406. FSImage may get corrupted after deleting snapshot.
+(Contributed by Jing Zhao, Stanislav Antic, Vinayakumar B, Yongjun Zhang)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34ab50ea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 353f29b..4e44b0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -576,7 +576,7 @@ public class INodeFile extends INodeWithAdditionalFields
 
   /** Clear all blocks of the file. */
   public void clearBlocks() {
-setBlocks(null);
+setBlocks(BlockInfo.EMPTY_ARRAY);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34ab50ea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index 8734956..1b85237 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -421,8 +421,9 @@ public abstract class INodeReference extends INode {
 setParent(null);
   }
 }
-
-WithName getLastWithName() {
+
+/** Return the last WithName reference if there is any, null otherwise. */
+public WithName getLastWithName() {
   return withNameList.size() > 0 ? 
   withNameList.get(withNameList.size() - 1) : null;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34ab50ea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 5c5b259..0111b3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -452,7 +452,16 @@ public class DirectoryWithSnapshotFeature implements 

hadoop git commit: YARN-3102. Decommisioned Nodes not listed in Web UI. Contributed by Kuhu Shukla (cherry picked from commit ed55950164a66e08fa34e30dba1030c5a986d1f1)

2016-02-01 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 d7b01ebcb -> 9862879e2


YARN-3102. Decommisioned Nodes not listed in Web UI. Contributed by Kuhu Shukla
(cherry picked from commit ed55950164a66e08fa34e30dba1030c5a986d1f1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9862879e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9862879e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9862879e

Branch: refs/heads/branch-2.8
Commit: 9862879e277f5183ca25debb89c8131a0a779022
Parents: d7b01eb
Author: Jason Lowe 
Authored: Mon Feb 1 23:15:26 2016 +
Committer: Jason Lowe 
Committed: Mon Feb 1 23:17:02 2016 +

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../resourcemanager/NodesListManager.java   | 119 +-
 .../server/resourcemanager/ResourceManager.java |   5 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  24 ++-
 .../yarn/server/resourcemanager/MockRM.java |  16 ++
 .../server/resourcemanager/TestRMRestart.java   |  11 +-
 .../TestResourceTrackerService.java | 154 +++
 7 files changed, 282 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9862879e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4075c5e..be726fb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1262,6 +1262,9 @@ Release 2.7.3 - UNRELEASED
 YARN-4428. Redirect RM page to AHS page when AHS turned on and RM page is
 not available (Chang Li via jlowe)
 
+YARN-3102. Decommisioned Nodes not listed in Web UI (Kuhu Shukla via
+jlowe)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9862879e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
index 4d9922b..0be5e5b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
@@ -32,6 +32,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.Node;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.HostsFileReader;
@@ -47,6 +48,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppNodeUpdateEvent.
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.yarn.util.Clock;
@@ -95,7 +97,7 @@ public class NodesListManager extends CompositeService 
implements
   YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH);
   this.hostsReader =
   createHostsFileReader(this.includesFile, this.excludesFile);
-  setDecomissionedNMsMetrics();
+  setDecomissionedNMs();
   printConfiguredHosts();
 } catch (YarnException ex) {
   disableHostsFileReader(ex);
@@ -157,9 +159,24 @@ public class NodesListManager extends CompositeService 
implements
 }
   }
 
-  private void setDecomissionedNMsMetrics() {
+  private void setDecomissionedNMs() {
 Set excludeList = hostsReader.getExcludedHosts();
-ClusterMetrics.getMetrics().setDecommisionedNMs(excludeList.size());
+for (final String host : excludeList) {
+  UnknownNodeId nodeId = new UnknownNodeId(host);
+  RMNodeImpl rmNode = new RMNodeImpl(nodeId,
+  rmContext, host, -1, -1, new UnknownNode(host), null, null);
+
+  RMNode prevRMNode =
+  rmContext.getRMNodes().putIfAbsent(nodeId, 

hadoop git commit: YARN-3102. Decommisioned Nodes not listed in Web UI. Contributed by Kuhu Shukla (cherry picked from commit ed55950164a66e08fa34e30dba1030c5a986d1f1)

2016-02-01 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fc8d9cc75 -> 36aae8050


YARN-3102. Decommisioned Nodes not listed in Web UI. Contributed by Kuhu Shukla
(cherry picked from commit ed55950164a66e08fa34e30dba1030c5a986d1f1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36aae805
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36aae805
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36aae805

Branch: refs/heads/branch-2
Commit: 36aae8050e4bd49752b74ed8d83d3cce7da48c71
Parents: fc8d9cc
Author: Jason Lowe 
Authored: Mon Feb 1 23:15:26 2016 +
Committer: Jason Lowe 
Committed: Mon Feb 1 23:16:18 2016 +

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../resourcemanager/NodesListManager.java   | 119 +-
 .../server/resourcemanager/ResourceManager.java |   5 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  24 ++-
 .../yarn/server/resourcemanager/MockRM.java |  16 ++
 .../server/resourcemanager/TestRMRestart.java   |  11 +-
 .../TestResourceTrackerService.java | 154 +++
 7 files changed, 282 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36aae805/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e781ca5..40715ad 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1392,6 +1392,9 @@ Release 2.7.3 - UNRELEASED
 YARN-4428. Redirect RM page to AHS page when AHS turned on and RM page is
 not available (Chang Li via jlowe)
 
+YARN-3102. Decommisioned Nodes not listed in Web UI (Kuhu Shukla via
+jlowe)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36aae805/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
index 96307a7..e6251fe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
@@ -32,6 +32,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.Node;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.HostsFileReader;
@@ -47,6 +48,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppNodeUpdateEvent.
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.yarn.util.Clock;
@@ -96,7 +98,7 @@ public class NodesListManager extends CompositeService 
implements
   YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH);
   this.hostsReader =
   createHostsFileReader(this.includesFile, this.excludesFile);
-  setDecomissionedNMsMetrics();
+  setDecomissionedNMs();
   printConfiguredHosts();
 } catch (YarnException ex) {
   disableHostsFileReader(ex);
@@ -158,9 +160,24 @@ public class NodesListManager extends CompositeService 
implements
 }
   }
 
-  private void setDecomissionedNMsMetrics() {
+  private void setDecomissionedNMs() {
 Set excludeList = hostsReader.getExcludedHosts();
-ClusterMetrics.getMetrics().setDecommisionedNMs(excludeList.size());
+for (final String host : excludeList) {
+  UnknownNodeId nodeId = new UnknownNodeId(host);
+  RMNodeImpl rmNode = new RMNodeImpl(nodeId,
+  rmContext, host, -1, -1, new UnknownNode(host), null, null);
+
+  RMNode prevRMNode =
+  rmContext.getRMNodes().putIfAbsent(nodeId, rmNode);

[1/3] hadoop git commit: YARN-4340. Add list API to reservation system. (Sean Po via wangda)

2016-02-01 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 9862879e2 -> c487453b9


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c487453b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
index 1756e86..bc98e2f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
@@ -98,19 +99,11 @@ public class TestInMemoryPlan {
 new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
 resCalc, minAlloc, maxAlloc, planName, replanner, true, context);
 ReservationId reservationID =
-ReservationSystemTestUtil.getNewReservationId();
+ReservationSystemTestUtil.getNewReservationId();
 int[] alloc = { 10, 10, 10, 10, 10, 10 };
 int start = 100;
-Map allocations =
-generateAllocation(start, alloc, false);
-ReservationDefinition rDef =
-createSimpleReservationDefinition(start, start + alloc.length,
-alloc.length, allocations.values());
-Map allocs =
-ReservationSystemUtil.toResources(allocations);
-ReservationAllocation rAllocation =
-new InMemoryReservationAllocation(reservationID, rDef, user, planName,
-start, start + alloc.length, allocs, resCalc, minAlloc);
+ReservationAllocation rAllocation = createReservationAllocation
+(reservationID, start, alloc);
 Assert.assertNull(plan.getReservationById(reservationID));
 try {
   plan.addReservation(rAllocation, false);
@@ -139,19 +132,11 @@ public class TestInMemoryPlan {
 new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
 resCalc, minAlloc, maxAlloc, planName, replanner, true, context);
 ReservationId reservationID =
-ReservationSystemTestUtil.getNewReservationId();
+ReservationSystemTestUtil.getNewReservationId();
 int[] alloc = {};
 int start = 100;
-Map allocations =
-new HashMap();
-ReservationDefinition rDef =
-createSimpleReservationDefinition(start, start + alloc.length,
-alloc.length, allocations.values());
-Map allocs = 
ReservationSystemUtil.toResources
-(allocations);
-ReservationAllocation rAllocation =
-new InMemoryReservationAllocation(reservationID, rDef, user, planName,
-start, start + alloc.length, allocs, resCalc, minAlloc);
+ReservationAllocation rAllocation = createReservationAllocation
+(reservationID, start, alloc);
 Assert.assertNull(plan.getReservationById(reservationID));
 try {
   plan.addReservation(rAllocation, false);
@@ -167,19 +152,11 @@ public class TestInMemoryPlan {
 new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
 resCalc, minAlloc, maxAlloc, planName, replanner, true, context);
 ReservationId reservationID =
-ReservationSystemTestUtil.getNewReservationId();
+ReservationSystemTestUtil.getNewReservationId();
 int[] alloc = { 10, 10, 10, 10, 10, 10 };
 int start = 100;
-Map allocations =
-generateAllocation(start, alloc, false);
-ReservationDefinition rDef =
-createSimpleReservationDefinition(start, start + alloc.length,
-alloc.length, allocations.values());
-Map allocs = 
ReservationSystemUtil.toResources
-(allocations);
-ReservationAllocation rAllocation =
-new InMemoryReservationAllocation(reservationID, rDef, user, planName,
-start, start + alloc.length, allocs, resCalc, minAlloc);
+ReservationAllocation rAllocation = createReservationAllocation
+

[2/3] hadoop git commit: YARN-4340. Add list API to reservation system. (Sean Po via wangda)

2016-02-01 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9875325d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceAllocationRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceAllocationRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceAllocationRequestPBImpl.java
new file mode 100644
index 000..737ae44
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceAllocationRequestPBImpl.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ResourceAllocationRequest;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceAllocationRequestProto;
+import 
org.apache.hadoop.yarn.proto.YarnProtos.ResourceAllocationRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+
+/**
+ * {@code ResourceAllocationRequestPBImpl} which implements the
+ * {@link ResourceAllocationRequest} class which represents an allocation
+ * made for a reservation for the current state of the plan. This can be
+ * changed for reasons such as re-planning, but will always be subject to the
+ * constraints of the user contract as described by a
+ * {@code ReservationDefinition}
+ * {@link Resource}
+ *
+ * 
+ * It includes:
+ * 
+ *   StartTime of the allocation.
+ *   EndTime of the allocation.
+ *   {@link Resource} reserved for the allocation.
+ * 
+ *
+ * @see Resource
+ */
+@Private
+@Unstable
+public class ResourceAllocationRequestPBImpl extends
+ResourceAllocationRequest {
+  private ResourceAllocationRequestProto proto =
+  ResourceAllocationRequestProto.getDefaultInstance();
+  private ResourceAllocationRequestProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private Resource capability = null;
+
+  public ResourceAllocationRequestPBImpl() {
+builder = ResourceAllocationRequestProto.newBuilder();
+  }
+
+  public ResourceAllocationRequestPBImpl(
+  ResourceAllocationRequestProto proto) {
+this.proto = proto;
+viaProto = true;
+  }
+
+  public ResourceAllocationRequestProto getProto() {
+mergeLocalToProto();
+proto = viaProto ? proto : builder.build();
+viaProto = true;
+return proto;
+  }
+
+  private void maybeInitBuilder() {
+if (viaProto || builder == null) {
+  builder = ResourceAllocationRequestProto.newBuilder(proto);
+}
+viaProto = false;
+  }
+
+  @Override
+  public Resource getCapability() {
+ResourceAllocationRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (this.capability != null) {
+  return this.capability;
+}
+if (!p.hasResource()) {
+  return null;
+}
+this.capability = convertFromProtoFormat(p.getResource());
+return this.capability;
+  }
+
+  @Override
+  public void setCapability(Resource newCapability) {
+maybeInitBuilder();
+if (newCapability == null) {
+  builder.clearResource();
+  return;
+}
+capability = newCapability;
+  }
+
+  @Override
+  public long getStartTime() {
+ResourceAllocationRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (!p.hasStartTime()) {
+  return 0;
+}
+return (p.getStartTime());
+  }
+
+  @Override
+  public void setStartTime(long startTime) {
+maybeInitBuilder();
+if (startTime <= 0) {
+  builder.clearStartTime();
+  return;
+}
+builder.setStartTime(startTime);
+  }
+
+  @Override
+  public long getEndTime() {
+ResourceAllocationRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (!p.hasEndTime()) {
+  return 0;
+}
+return (p.getEndTime());
+  }
+
+  @Override
+ 

[1/3] hadoop git commit: YARN-4340. Add list API to reservation system. (Sean Po via wangda)

2016-02-01 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk ed5595016 -> 9875325d5


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9875325d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
index 1756e86..bc98e2f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
@@ -98,19 +99,11 @@ public class TestInMemoryPlan {
 new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
 resCalc, minAlloc, maxAlloc, planName, replanner, true, context);
 ReservationId reservationID =
-ReservationSystemTestUtil.getNewReservationId();
+ReservationSystemTestUtil.getNewReservationId();
 int[] alloc = { 10, 10, 10, 10, 10, 10 };
 int start = 100;
-Map allocations =
-generateAllocation(start, alloc, false);
-ReservationDefinition rDef =
-createSimpleReservationDefinition(start, start + alloc.length,
-alloc.length, allocations.values());
-Map allocs =
-ReservationSystemUtil.toResources(allocations);
-ReservationAllocation rAllocation =
-new InMemoryReservationAllocation(reservationID, rDef, user, planName,
-start, start + alloc.length, allocs, resCalc, minAlloc);
+ReservationAllocation rAllocation = createReservationAllocation
+(reservationID, start, alloc);
 Assert.assertNull(plan.getReservationById(reservationID));
 try {
   plan.addReservation(rAllocation, false);
@@ -139,19 +132,11 @@ public class TestInMemoryPlan {
 new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
 resCalc, minAlloc, maxAlloc, planName, replanner, true, context);
 ReservationId reservationID =
-ReservationSystemTestUtil.getNewReservationId();
+ReservationSystemTestUtil.getNewReservationId();
 int[] alloc = {};
 int start = 100;
-Map allocations =
-new HashMap();
-ReservationDefinition rDef =
-createSimpleReservationDefinition(start, start + alloc.length,
-alloc.length, allocations.values());
-Map allocs = 
ReservationSystemUtil.toResources
-(allocations);
-ReservationAllocation rAllocation =
-new InMemoryReservationAllocation(reservationID, rDef, user, planName,
-start, start + alloc.length, allocs, resCalc, minAlloc);
+ReservationAllocation rAllocation = createReservationAllocation
+(reservationID, start, alloc);
 Assert.assertNull(plan.getReservationById(reservationID));
 try {
   plan.addReservation(rAllocation, false);
@@ -167,19 +152,11 @@ public class TestInMemoryPlan {
 new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
 resCalc, minAlloc, maxAlloc, planName, replanner, true, context);
 ReservationId reservationID =
-ReservationSystemTestUtil.getNewReservationId();
+ReservationSystemTestUtil.getNewReservationId();
 int[] alloc = { 10, 10, 10, 10, 10, 10 };
 int start = 100;
-Map allocations =
-generateAllocation(start, alloc, false);
-ReservationDefinition rDef =
-createSimpleReservationDefinition(start, start + alloc.length,
-alloc.length, allocations.values());
-Map allocs = 
ReservationSystemUtil.toResources
-(allocations);
-ReservationAllocation rAllocation =
-new InMemoryReservationAllocation(reservationID, rDef, user, planName,
-start, start + alloc.length, allocs, resCalc, minAlloc);
+ReservationAllocation rAllocation = createReservationAllocation
+

[1/3] hadoop git commit: YARN-4340. Add list API to reservation system. (Sean Po via wangda)

2016-02-01 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 36aae8050 -> 9591363ad


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9591363a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
index 1756e86..bc98e2f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
@@ -98,19 +99,11 @@ public class TestInMemoryPlan {
 new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
 resCalc, minAlloc, maxAlloc, planName, replanner, true, context);
 ReservationId reservationID =
-ReservationSystemTestUtil.getNewReservationId();
+ReservationSystemTestUtil.getNewReservationId();
 int[] alloc = { 10, 10, 10, 10, 10, 10 };
 int start = 100;
-Map allocations =
-generateAllocation(start, alloc, false);
-ReservationDefinition rDef =
-createSimpleReservationDefinition(start, start + alloc.length,
-alloc.length, allocations.values());
-Map allocs =
-ReservationSystemUtil.toResources(allocations);
-ReservationAllocation rAllocation =
-new InMemoryReservationAllocation(reservationID, rDef, user, planName,
-start, start + alloc.length, allocs, resCalc, minAlloc);
+ReservationAllocation rAllocation = createReservationAllocation
+(reservationID, start, alloc);
 Assert.assertNull(plan.getReservationById(reservationID));
 try {
   plan.addReservation(rAllocation, false);
@@ -139,19 +132,11 @@ public class TestInMemoryPlan {
 new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
 resCalc, minAlloc, maxAlloc, planName, replanner, true, context);
 ReservationId reservationID =
-ReservationSystemTestUtil.getNewReservationId();
+ReservationSystemTestUtil.getNewReservationId();
 int[] alloc = {};
 int start = 100;
-Map allocations =
-new HashMap();
-ReservationDefinition rDef =
-createSimpleReservationDefinition(start, start + alloc.length,
-alloc.length, allocations.values());
-Map allocs = 
ReservationSystemUtil.toResources
-(allocations);
-ReservationAllocation rAllocation =
-new InMemoryReservationAllocation(reservationID, rDef, user, planName,
-start, start + alloc.length, allocs, resCalc, minAlloc);
+ReservationAllocation rAllocation = createReservationAllocation
+(reservationID, start, alloc);
 Assert.assertNull(plan.getReservationById(reservationID));
 try {
   plan.addReservation(rAllocation, false);
@@ -167,19 +152,11 @@ public class TestInMemoryPlan {
 new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
 resCalc, minAlloc, maxAlloc, planName, replanner, true, context);
 ReservationId reservationID =
-ReservationSystemTestUtil.getNewReservationId();
+ReservationSystemTestUtil.getNewReservationId();
 int[] alloc = { 10, 10, 10, 10, 10, 10 };
 int start = 100;
-Map allocations =
-generateAllocation(start, alloc, false);
-ReservationDefinition rDef =
-createSimpleReservationDefinition(start, start + alloc.length,
-alloc.length, allocations.values());
-Map allocs = 
ReservationSystemUtil.toResources
-(allocations);
-ReservationAllocation rAllocation =
-new InMemoryReservationAllocation(reservationID, rDef, user, planName,
-start, start + alloc.length, allocs, resCalc, minAlloc);
+ReservationAllocation rAllocation = createReservationAllocation
+

[3/3] hadoop git commit: YARN-4340. Add list API to reservation system. (Sean Po via wangda)

2016-02-01 Thread wangda
YARN-4340. Add list API to reservation system. (Sean Po via wangda)

(cherry picked from commit 9875325d5c63f343809907d06bf48a298035a611)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9591363a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9591363a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9591363a

Branch: refs/heads/branch-2
Commit: 9591363ad8f49fb3c1f2f11285508548c3067e97
Parents: 36aae80
Author: Wangda Tan 
Authored: Tue Feb 2 10:17:33 2016 +0800
Committer: Wangda Tan 
Committed: Tue Feb 2 10:19:19 2016 +0800

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |   7 +
 .../hadoop/mapred/TestClientRedirect.java   |   8 +
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../yarn/api/ApplicationClientProtocol.java |  49 ++-
 .../protocolrecords/ReservationListRequest.java | 228 ++
 .../ReservationListResponse.java|  79 +
 .../api/records/ReservationAllocationState.java | 191 +++
 .../api/records/ResourceAllocationRequest.java  | 123 
 .../main/proto/applicationclient_protocol.proto |   1 +
 .../src/main/proto/yarn_protos.proto|  17 +
 .../src/main/proto/yarn_service_protos.proto|  12 +
 .../hadoop/yarn/client/api/YarnClient.java  |  49 ++-
 .../yarn/client/api/impl/YarnClientImpl.java|  10 +-
 .../yarn/client/api/impl/TestYarnClient.java| 173 ++
 .../ApplicationClientProtocolPBClientImpl.java  |  17 +
 .../ApplicationClientProtocolPBServiceImpl.java |  24 +-
 .../impl/pb/ReservationListRequestPBImpl.java   | 178 +++
 .../impl/pb/ReservationListResponsePBImpl.java  | 157 ++
 .../pb/ReservationAllocationStatePBImpl.java| 288 +
 .../pb/ResourceAllocationRequestPBImpl.java | 188 +++
 .../hadoop/yarn/api/TestPBImplRecords.java  |  22 +-
 .../amrmproxy/MockResourceManagerFacade.java|  14 +-
 .../server/resourcemanager/ClientRMService.java |  44 +++
 .../server/resourcemanager/RMAuditLogger.java   |   2 +
 .../recovery/FileSystemRMStateStore.java|   2 +-
 .../recovery/LeveldbRMStateStore.java   |   2 +-
 .../recovery/MemoryRMStateStore.java|   2 +-
 .../recovery/NullRMStateStore.java  |   2 +-
 .../resourcemanager/recovery/RMStateStore.java  |   2 +-
 .../RMStateStoreStoreReservationEvent.java  |   2 +-
 .../recovery/ZKRMStateStore.java|   2 +-
 .../reservation/AbstractReservationSystem.java  |   2 +-
 .../reservation/InMemoryPlan.java   |  71 +++--
 .../resourcemanager/reservation/PlanView.java   |  17 +
 .../reservation/ReservationInputValidator.java  | 122 ---
 .../reservation/ReservationSystemUtil.java  |  45 ++-
 .../yarn_server_resourcemanager_recovery.proto  |  16 -
 .../resourcemanager/TestClientRMService.java| 179 +++
 .../TestReservationSystemWithRMHA.java  |   2 +-
 .../recovery/RMStateStoreTestBase.java  |   6 +-
 .../reservation/TestInMemoryPlan.java   | 314 +--
 .../TestReservationInputValidator.java  | 103 +-
 .../reservation/TestReservationSystemUtil.java  | 134 
 43 files changed, 2702 insertions(+), 206 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9591363a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index e32b398..21c0d0f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -46,6 +46,8 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse;
 

[2/3] hadoop git commit: YARN-4340. Add list API to reservation system. (Sean Po via wangda)

2016-02-01 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9591363a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceAllocationRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceAllocationRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceAllocationRequestPBImpl.java
new file mode 100644
index 000..737ae44
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceAllocationRequestPBImpl.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ResourceAllocationRequest;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceAllocationRequestProto;
+import 
org.apache.hadoop.yarn.proto.YarnProtos.ResourceAllocationRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+
+/**
+ * {@code ResourceAllocationRequestPBImpl} which implements the
+ * {@link ResourceAllocationRequest} class which represents an allocation
+ * made for a reservation for the current state of the plan. This can be
+ * changed for reasons such as re-planning, but will always be subject to the
+ * constraints of the user contract as described by a
+ * {@code ReservationDefinition}
+ * {@link Resource}
+ *
+ * 
+ * It includes:
+ * 
+ *   StartTime of the allocation.
+ *   EndTime of the allocation.
+ *   {@link Resource} reserved for the allocation.
+ * 
+ *
+ * @see Resource
+ */
+@Private
+@Unstable
+public class ResourceAllocationRequestPBImpl extends
+ResourceAllocationRequest {
+  private ResourceAllocationRequestProto proto =
+  ResourceAllocationRequestProto.getDefaultInstance();
+  private ResourceAllocationRequestProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private Resource capability = null;
+
+  public ResourceAllocationRequestPBImpl() {
+builder = ResourceAllocationRequestProto.newBuilder();
+  }
+
+  public ResourceAllocationRequestPBImpl(
+  ResourceAllocationRequestProto proto) {
+this.proto = proto;
+viaProto = true;
+  }
+
+  public ResourceAllocationRequestProto getProto() {
+mergeLocalToProto();
+proto = viaProto ? proto : builder.build();
+viaProto = true;
+return proto;
+  }
+
+  private void maybeInitBuilder() {
+if (viaProto || builder == null) {
+  builder = ResourceAllocationRequestProto.newBuilder(proto);
+}
+viaProto = false;
+  }
+
+  @Override
+  public Resource getCapability() {
+ResourceAllocationRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (this.capability != null) {
+  return this.capability;
+}
+if (!p.hasResource()) {
+  return null;
+}
+this.capability = convertFromProtoFormat(p.getResource());
+return this.capability;
+  }
+
+  @Override
+  public void setCapability(Resource newCapability) {
+maybeInitBuilder();
+if (newCapability == null) {
+  builder.clearResource();
+  return;
+}
+capability = newCapability;
+  }
+
+  @Override
+  public long getStartTime() {
+ResourceAllocationRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (!p.hasStartTime()) {
+  return 0;
+}
+return (p.getStartTime());
+  }
+
+  @Override
+  public void setStartTime(long startTime) {
+maybeInitBuilder();
+if (startTime <= 0) {
+  builder.clearStartTime();
+  return;
+}
+builder.setStartTime(startTime);
+  }
+
+  @Override
+  public long getEndTime() {
+ResourceAllocationRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (!p.hasEndTime()) {
+  return 0;
+}
+return (p.getEndTime());
+  }
+
+  @Override
+ 

[3/3] hadoop git commit: YARN-4340. Add list API to reservation system. (Sean Po via wangda)

2016-02-01 Thread wangda
YARN-4340. Add list API to reservation system. (Sean Po via wangda)

(cherry picked from commit 9875325d5c63f343809907d06bf48a298035a611)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c487453b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c487453b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c487453b

Branch: refs/heads/branch-2.8
Commit: c487453b91bac34ee9f9d98406c6e9dfbe8d38ab
Parents: 9862879
Author: Wangda Tan 
Authored: Tue Feb 2 10:17:33 2016 +0800
Committer: Wangda Tan 
Committed: Tue Feb 2 10:23:25 2016 +0800

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |   7 +
 .../hadoop/mapred/TestClientRedirect.java   |   8 +
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../yarn/api/ApplicationClientProtocol.java |  49 ++-
 .../protocolrecords/ReservationListRequest.java | 228 ++
 .../ReservationListResponse.java|  79 +
 .../api/records/ReservationAllocationState.java | 191 +++
 .../api/records/ResourceAllocationRequest.java  | 123 
 .../main/proto/applicationclient_protocol.proto |   1 +
 .../src/main/proto/yarn_protos.proto|  17 +
 .../src/main/proto/yarn_service_protos.proto|  12 +
 .../hadoop/yarn/client/api/YarnClient.java  |  49 ++-
 .../yarn/client/api/impl/YarnClientImpl.java|  10 +-
 .../yarn/client/api/impl/TestYarnClient.java| 173 ++
 .../ApplicationClientProtocolPBClientImpl.java  |  17 +
 .../ApplicationClientProtocolPBServiceImpl.java |  24 +-
 .../impl/pb/ReservationListRequestPBImpl.java   | 178 +++
 .../impl/pb/ReservationListResponsePBImpl.java  | 157 ++
 .../pb/ReservationAllocationStatePBImpl.java| 288 +
 .../pb/ResourceAllocationRequestPBImpl.java | 188 +++
 .../hadoop/yarn/api/TestPBImplRecords.java  |  22 +-
 .../amrmproxy/MockResourceManagerFacade.java|  14 +-
 .../server/resourcemanager/ClientRMService.java |  44 +++
 .../server/resourcemanager/RMAuditLogger.java   |   2 +
 .../recovery/FileSystemRMStateStore.java|   2 +-
 .../recovery/LeveldbRMStateStore.java   |   2 +-
 .../recovery/MemoryRMStateStore.java|   2 +-
 .../recovery/NullRMStateStore.java  |   2 +-
 .../resourcemanager/recovery/RMStateStore.java  |   2 +-
 .../RMStateStoreStoreReservationEvent.java  |   2 +-
 .../recovery/ZKRMStateStore.java|   2 +-
 .../reservation/AbstractReservationSystem.java  |   2 +-
 .../reservation/InMemoryPlan.java   |  71 +++--
 .../resourcemanager/reservation/PlanView.java   |  17 +
 .../reservation/ReservationInputValidator.java  | 122 ---
 .../reservation/ReservationSystemUtil.java  |  45 ++-
 .../yarn_server_resourcemanager_recovery.proto  |  16 -
 .../resourcemanager/TestClientRMService.java| 179 +++
 .../TestReservationSystemWithRMHA.java  |   2 +-
 .../recovery/RMStateStoreTestBase.java  |   6 +-
 .../reservation/TestInMemoryPlan.java   | 314 +--
 .../TestReservationInputValidator.java  | 103 +-
 .../reservation/TestReservationSystemUtil.java  | 134 
 43 files changed, 2702 insertions(+), 206 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c487453b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index f5fd0cb..54e1549 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -46,6 +46,8 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse;
 

[2/3] hadoop git commit: YARN-4340. Add list API to reservation system. (Sean Po via wangda)

2016-02-01 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c487453b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceAllocationRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceAllocationRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceAllocationRequestPBImpl.java
new file mode 100644
index 000..737ae44
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceAllocationRequestPBImpl.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ResourceAllocationRequest;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceAllocationRequestProto;
+import 
org.apache.hadoop.yarn.proto.YarnProtos.ResourceAllocationRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+
+/**
+ * {@code ResourceAllocationRequestPBImpl} which implements the
+ * {@link ResourceAllocationRequest} class which represents an allocation
+ * made for a reservation for the current state of the plan. This can be
+ * changed for reasons such as re-planning, but will always be subject to the
+ * constraints of the user contract as described by a
+ * {@code ReservationDefinition}
+ * {@link Resource}
+ *
+ * 
+ * It includes:
+ * 
+ *   StartTime of the allocation.
+ *   EndTime of the allocation.
+ *   {@link Resource} reserved for the allocation.
+ * 
+ *
+ * @see Resource
+ */
+@Private
+@Unstable
+public class ResourceAllocationRequestPBImpl extends
+ResourceAllocationRequest {
+  private ResourceAllocationRequestProto proto =
+  ResourceAllocationRequestProto.getDefaultInstance();
+  private ResourceAllocationRequestProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private Resource capability = null;
+
+  public ResourceAllocationRequestPBImpl() {
+builder = ResourceAllocationRequestProto.newBuilder();
+  }
+
+  public ResourceAllocationRequestPBImpl(
+  ResourceAllocationRequestProto proto) {
+this.proto = proto;
+viaProto = true;
+  }
+
+  public ResourceAllocationRequestProto getProto() {
+mergeLocalToProto();
+proto = viaProto ? proto : builder.build();
+viaProto = true;
+return proto;
+  }
+
+  private void maybeInitBuilder() {
+if (viaProto || builder == null) {
+  builder = ResourceAllocationRequestProto.newBuilder(proto);
+}
+viaProto = false;
+  }
+
+  @Override
+  public Resource getCapability() {
+ResourceAllocationRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (this.capability != null) {
+  return this.capability;
+}
+if (!p.hasResource()) {
+  return null;
+}
+this.capability = convertFromProtoFormat(p.getResource());
+return this.capability;
+  }
+
+  @Override
+  public void setCapability(Resource newCapability) {
+maybeInitBuilder();
+if (newCapability == null) {
+  builder.clearResource();
+  return;
+}
+capability = newCapability;
+  }
+
+  @Override
+  public long getStartTime() {
+ResourceAllocationRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (!p.hasStartTime()) {
+  return 0;
+}
+return (p.getStartTime());
+  }
+
+  @Override
+  public void setStartTime(long startTime) {
+maybeInitBuilder();
+if (startTime <= 0) {
+  builder.clearStartTime();
+  return;
+}
+builder.setStartTime(startTime);
+  }
+
+  @Override
+  public long getEndTime() {
+ResourceAllocationRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (!p.hasEndTime()) {
+  return 0;
+}
+return (p.getEndTime());
+  }
+
+  @Override
+ 

[1/3] hadoop git commit: HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final DatanodeStorage.State state)' method (Contributed by Daryn Sharp)

2016-02-01 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 676f21559 -> 71374cca9
  refs/heads/branch-2.8 a9916057e -> 6eae76f7a
  refs/heads/trunk 2673cbaf5 -> e418bd1fb


HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final 
DatanodeStorage.State state)' method (Contributed by Daryn Sharp)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e418bd1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e418bd1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e418bd1f

Branch: refs/heads/trunk
Commit: e418bd1fb0568ce7ae22f588fea2dd9c95567383
Parents: 2673cba
Author: Vinayakumar B 
Authored: Mon Feb 1 13:24:05 2016 +0530
Committer: Vinayakumar B 
Committed: Mon Feb 1 13:24:05 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockManager.java| 16 
 .../hdfs/server/blockmanagement/BlocksMap.java  | 20 
 3 files changed, 15 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e418bd1f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f7487fe..432e686 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2655,6 +2655,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9682. Fix a typo "aplication" in HttpFS document.
 (Weiwei Yang via aajisaka)
 
+HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final
+DatanodeStorage.State state)' method (Daryn Sharp via vinayakumarb)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e418bd1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a76429e..587e6b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1212,8 +1212,10 @@ public class BlockManager implements BlockStatsMXBean {
   return;
 }
 StringBuilder datanodes = new StringBuilder();
-for(DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock,
-State.NORMAL)) {
+for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
+  if (storage.getState() != State.NORMAL) {
+continue;
+  }
   final DatanodeDescriptor node = storage.getDatanodeDescriptor();
   final Block b = getBlockOnStorage(storedBlock, storage);
   if (b != null) {
@@ -3164,7 +3166,10 @@ public class BlockManager implements BlockStatsMXBean {
 Collection nonExcess = new ArrayList<>();
 Collection corruptNodes = corruptReplicas
 .getNodes(block);
-for(DatanodeStorageInfo storage : blocksMap.getStorages(block, 
State.NORMAL)) {
+for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
+  if (storage.getState() != State.NORMAL) {
+continue;
+  }
   final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
   if (storage.areBlockContentsStale()) {
 LOG.trace("BLOCK* processOverReplicatedBlock: Postponing {}"
@@ -3665,7 +3670,10 @@ public class BlockManager implements BlockStatsMXBean {
 // else proceed with fast case
 int live = 0;
 Collection nodesCorrupt = corruptReplicas.getNodes(b);
-for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
+for (DatanodeStorageInfo storage : blocksMap.getStorages(b)) {
+  if (storage.getState() != State.NORMAL) {
+continue;
+  }
   final DatanodeDescriptor node = storage.getDatanodeDescriptor();
   if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node)))
 live++;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e418bd1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index 

[3/3] hadoop git commit: HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final DatanodeStorage.State state)' method (Contributed by Daryn Sharp)

2016-02-01 Thread vinayakumarb
HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final 
DatanodeStorage.State state)' method (Contributed by Daryn Sharp)

(cherry picked from commit e418bd1fb0568ce7ae22f588fea2dd9c95567383)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

(cherry picked from commit 71374cca9d0981ac21bb588b85cbc3b0b8908be0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6eae76f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6eae76f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6eae76f7

Branch: refs/heads/branch-2.8
Commit: 6eae76f7a5c3d1a556291e705c360b9baa4fcb7a
Parents: a991605
Author: Vinayakumar B 
Authored: Mon Feb 1 13:24:05 2016 +0530
Committer: Vinayakumar B 
Committed: Mon Feb 1 13:32:59 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockManager.java| 15 ---
 .../hdfs/server/blockmanagement/BlocksMap.java  | 20 
 3 files changed, 15 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eae76f7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d0813c4..a1c5794 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1684,6 +1684,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9682. Fix a typo "aplication" in HttpFS document.
 (Weiwei Yang via aajisaka)
 
+HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final
+DatanodeStorage.State state)' method (Daryn Sharp via vinayakumarb)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eae76f7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index b477fad..e6ec759 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1137,7 +1137,10 @@ public class BlockManager implements BlockStatsMXBean {
   return;
 }
 StringBuilder datanodes = new StringBuilder();
-for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
+for (DatanodeStorageInfo storage : blocksMap.getStorages(b)) {
+  if (storage.getState() != State.NORMAL) {
+continue;
+  }
   final DatanodeDescriptor node = storage.getDatanodeDescriptor();
   invalidateBlocks.add(b, node, false);
   datanodes.append(node).append(" ");
@@ -2912,7 +2915,10 @@ public class BlockManager implements BlockStatsMXBean {
 Collection nonExcess = new 
ArrayList();
 Collection corruptNodes = corruptReplicas
 .getNodes(block);
-for(DatanodeStorageInfo storage : blocksMap.getStorages(block, 
State.NORMAL)) {
+for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
+  if (storage.getState() != State.NORMAL) {
+continue;
+  }
   final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
   if (storage.areBlockContentsStale()) {
 LOG.trace("BLOCK* processOverReplicatedBlock: Postponing {}"
@@ -3294,7 +3300,10 @@ public class BlockManager implements BlockStatsMXBean {
 // else proceed with fast case
 int live = 0;
 Collection nodesCorrupt = corruptReplicas.getNodes(b);
-for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
+for (DatanodeStorageInfo storage : blocksMap.getStorages(b)) {
+  if (storage.getState() != State.NORMAL) {
+continue;
+  }
   final DatanodeDescriptor node = storage.getDatanodeDescriptor();
   if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node)))
 live++;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eae76f7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
 

[2/3] hadoop git commit: HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final DatanodeStorage.State state)' method (Contributed by Daryn Sharp)

2016-02-01 Thread vinayakumarb
HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final 
DatanodeStorage.State state)' method (Contributed by Daryn Sharp)

(cherry picked from commit e418bd1fb0568ce7ae22f588fea2dd9c95567383)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71374cca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71374cca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71374cca

Branch: refs/heads/branch-2
Commit: 71374cca9d0981ac21bb588b85cbc3b0b8908be0
Parents: 676f215
Author: Vinayakumar B 
Authored: Mon Feb 1 13:24:05 2016 +0530
Committer: Vinayakumar B 
Committed: Mon Feb 1 13:32:00 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockManager.java| 15 ---
 .../hdfs/server/blockmanagement/BlocksMap.java  | 20 
 3 files changed, 15 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71374cca/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index aaab8ae..6b80233 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1766,6 +1766,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9682. Fix a typo "aplication" in HttpFS document.
 (Weiwei Yang via aajisaka)
 
+HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final
+DatanodeStorage.State state)' method (Daryn Sharp via vinayakumarb)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71374cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 704db4e..e88c18d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1133,7 +1133,10 @@ public class BlockManager implements BlockStatsMXBean {
   return;
 }
 StringBuilder datanodes = new StringBuilder();
-for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
+for (DatanodeStorageInfo storage : blocksMap.getStorages(b)) {
+  if (storage.getState() != State.NORMAL) {
+continue;
+  }
   final DatanodeDescriptor node = storage.getDatanodeDescriptor();
   invalidateBlocks.add(b, node, false);
   datanodes.append(node).append(" ");
@@ -2962,7 +2965,10 @@ public class BlockManager implements BlockStatsMXBean {
 Collection nonExcess = new 
ArrayList();
 Collection corruptNodes = corruptReplicas
 .getNodes(block);
-for(DatanodeStorageInfo storage : blocksMap.getStorages(block, 
State.NORMAL)) {
+for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
+  if (storage.getState() != State.NORMAL) {
+continue;
+  }
   final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
   if (storage.areBlockContentsStale()) {
 LOG.trace("BLOCK* processOverReplicatedBlock: Postponing {}"
@@ -3343,7 +3349,10 @@ public class BlockManager implements BlockStatsMXBean {
 // else proceed with fast case
 int live = 0;
 Collection nodesCorrupt = corruptReplicas.getNodes(b);
-for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
+for (DatanodeStorageInfo storage : blocksMap.getStorages(b)) {
+  if (storage.getState() != State.NORMAL) {
+continue;
+  }
   final DatanodeDescriptor node = storage.getDatanodeDescriptor();
   if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node)))
 live++;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71374cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index 

hadoop git commit: HDFS-9659. EditLogTailerThread to Active Namenode RPC should timeout (Contributed by surendra singh lilhore)

2016-02-01 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk e418bd1fb -> 8f2622b6a


HDFS-9659. EditLogTailerThread to Active Namenode RPC should timeout 
(Contributed by surendra singh lilhore)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f2622b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f2622b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f2622b6

Branch: refs/heads/trunk
Commit: 8f2622b6a0603f92e8b5784879da28d3d5797fc1
Parents: e418bd1
Author: Vinayakumar B 
Authored: Mon Feb 1 14:10:55 2016 +0530
Committer: Vinayakumar B 
Committed: Mon Feb 1 14:10:55 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java   | 6 +-
 2 files changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f2622b6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 432e686..5a8b525 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -903,6 +903,9 @@ Trunk (Unreleased)
   HDFS-9575. Use byte array for internal block indices in a striped block.
   (jing9 via szetszwo)
 
+  HDFS-9659. EditLogTailerThread to Active Namenode RPC should timeout
+  (surendra singh lilhore via vinayakumarb)
+
 Release 2.9.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f2622b6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index 6e60dba..405bf4f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -450,8 +450,12 @@ public class EditLogTailer {
 
   currentNN = nnLookup.next();
   try {
+int rpcTimeout = conf.getInt(
+DFSConfigKeys.DFS_HA_LOGROLL_RPC_TIMEOUT_KEY,
+DFSConfigKeys.DFS_HA_LOGROLL_RPC_TIMEOUT_DEFAULT);
 NamenodeProtocolPB proxy = 
RPC.waitForProxy(NamenodeProtocolPB.class,
-RPC.getProtocolVersion(NamenodeProtocolPB.class), 
currentNN.getIpcAddress(), conf);
+RPC.getProtocolVersion(NamenodeProtocolPB.class), 
currentNN.getIpcAddress(), conf,
+rpcTimeout, Long.MAX_VALUE);
 cachedActiveProxy = new NamenodeProtocolTranslatorPB(proxy);
 break;
   } catch (IOException e) {



[1/2] hadoop git commit: HDFS-9679: Fix inconsistencies with libhdfs C API. Contributed by James Clampffer

2016-02-01 Thread bobhansen
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8707 6df167c85 -> 2e0dd25cb


HDFS-9679: Fix inconsistencies with libhdfs C API.  Contributed by James 
Clampffer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0485f0a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0485f0a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0485f0a

Branch: refs/heads/HDFS-8707
Commit: b0485f0a02c7088a209617c8714e603532f282ae
Parents: 6df167c
Author: Bob Hansen 
Authored: Mon Feb 1 07:49:41 2016 -0500
Committer: Bob Hansen 
Committed: Mon Feb 1 07:49:41 2016 -0500

--
 .../main/native/libhdfspp/lib/bindings/c/hdfs.cc| 16 +---
 1 file changed, 9 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0485f0a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
index d5b5d6e..3262c66 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
@@ -121,6 +121,9 @@ static int Error(const Status ) {
 case Status::Code::kException:
   ReportError(EINTR, "Exception raised");
   break;
+case Status::Code::kOperationCanceled:
+  ReportError(EINTR, "Operation canceled");
+  break;
 default:
   ReportError(ENOSYS, "Error: unrecognised code");
   }
@@ -147,9 +150,9 @@ bool CheckSystemAndHandle(hdfsFS fs, hdfsFile file) {
 int hdfsFileIsOpenForRead(hdfsFile file) {
   /* files can only be open for reads at the moment, do a quick check */
   if (file) {
-return true; // Update implementation when we get file writing
+return 1; // Update implementation when we get file writing
   }
-  return false;
+  return 0;
 }
 
 hdfsFS hdfsConnect(const char *nn, tPort port) {
@@ -239,6 +242,7 @@ tSize hdfsRead(hdfsFS fs, hdfsFile file, void *buffer, 
tSize length) {
   return (tSize)len;
 }
 
+/* 0 on success, -1 on error*/
 int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos) {
   if (!CheckSystemAndHandle(fs, file)) {
 return -1;
@@ -250,7 +254,7 @@ int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos) {
 return Error(stat);
   }
 
-  return (int)desired;
+  return 0;
 }
 
 tOffset hdfsTell(hdfsFS fs, hdfsFile file) {
@@ -267,6 +271,7 @@ tOffset hdfsTell(hdfsFS fs, hdfsFile file) {
   return offset;
 }
 
+/* extended API */
 int hdfsCancel(hdfsFS fs, hdfsFile file) {
   if (!CheckSystemAndHandle(fs, file)) {
 return -1;
@@ -340,10 +345,7 @@ int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const 
char *key,
 
 void hdfsConfStrFree(char *val)
 {
-  if (val)
-  {
-free(val);
-  }
+  free(val);
 }
 
 hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld) {



[2/2] hadoop git commit: HDFS-9712. libhdfs++: Reimplement Status object as a normal struct

2016-02-01 Thread bobhansen
HDFS-9712. libhdfs++: Reimplement Status object as a normal struct


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e0dd25c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e0dd25c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e0dd25c

Branch: refs/heads/HDFS-8707
Commit: 2e0dd25cbab627493e6c60f488adaac3f92a583f
Parents: b0485f0
Author: Bob Hansen 
Authored: Mon Feb 1 07:57:29 2016 -0500
Committer: Bob Hansen 
Committed: Mon Feb 1 07:57:29 2016 -0500

--
 .../native/libhdfspp/include/hdfspp/status.h| 68 +
 .../main/native/libhdfspp/lib/common/status.cc  | 80 
 2 files changed, 64 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e0dd25c/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/status.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/status.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/status.h
index 89be771..a91ac9d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/status.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/status.h
@@ -23,43 +23,30 @@
 
 namespace hdfs {
 
-class StatusHelper;
 class Status {
  public:
   // Create a success status.
-  Status() : state_(NULL) { }
-  ~Status() { delete[] state_; }
-  explicit Status(int code, const char *msg);
+  Status() : code_(0) {};
+  Status(int code, const char *msg);
+  Status(int code, const char *msg1, const char *msg2);
 
-  // Copy the specified status.
-  Status(const Status& s);
-  void operator=(const Status& s);
+  // Factory methods
+  static Status OK();
+  static Status InvalidArgument(const char *msg);
+  static Status ResourceUnavailable(const char *msg);
+  static Status Unimplemented();
+  static Status Exception(const char *expception_class_name, const char 
*error_message);
+  static Status Error(const char *error_message);
+  static Status Canceled();
 
-  // Return a success status.
-  static Status OK() { return Status(); }
-  static Status InvalidArgument(const char *msg)
-  { return Status(kInvalidArgument, msg); }
-  static Status ResourceUnavailable(const char *msg)
-  { return Status(kResourceUnavailable, msg); }
-  static Status Unimplemented()
-  { return Status(kUnimplemented, ""); }
-  static Status Exception(const char *expception_class_name, const char 
*error_message)
-  { return Status(kException, expception_class_name, error_message); }
-  static Status Error(const char *error_message)
-  { return Exception("Exception", error_message); }
-  static Status Canceled()
-  { return Status(kOperationCanceled,""); }
+  // success
+  bool ok() const { return code_ == 0; }
 
-  // Returns true iff the status indicates success.
-  bool ok() const { return (state_ == NULL); }
-
-  // Return a string representation of this status suitable for printing.
   // Returns the string "OK" for success.
   std::string ToString() const;
 
-  int code() const {
-return (state_ == NULL) ? kOk : static_cast(state_[4]);
-  }
+  // get error code
+  int code() const { return code_; }
 
   enum Code {
 kOk = 0,
@@ -71,31 +58,10 @@ class Status {
   };
 
  private:
-  // OK status has a NULL state_.  Otherwise, state_ is a new[] array
-  // of the following form:
-  //state_[0..3] == length of message
-  //state_[4]== code
-  //state_[5..]  == message
-  const char* state_;
-
-  explicit Status(int code, const char *msg1, const char *msg2);
-  static const char *CopyState(const char* s);
-  static const char *ConstructState(int code, const char *msg1, const char 
*msg2);
+  int code_;
+  std::string msg_;
 };
 
-inline Status::Status(const Status& s) {
-  state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
-}
-
-inline void Status::operator=(const Status& s) {
-  // The following condition catches both aliasing (when this == ),
-  // and the common case where both s and *this are ok.
-  if (state_ != s.state_) {
-delete[] state_;
-state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
-  }
-}
-
 }
 
 #endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e0dd25c/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/status.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/status.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/status.cc
index 828f6aa..eb22247 100644
--- 

hadoop git commit: MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread. Contributed by Xuan Gong (cherry picked from commit 59a212b6e1265adfa9b55c71b65a22157dfccf77)

2016-02-01 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 ac43c5635 -> b52279ca2


MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread. 
Contributed by Xuan Gong
(cherry picked from commit 59a212b6e1265adfa9b55c71b65a22157dfccf77)

Conflicts:

hadoop-mapreduce-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b52279ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b52279ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b52279ca

Branch: refs/heads/branch-2.6
Commit: b52279ca281d4df94a3fd8a7b2217f8b959fb6c3
Parents: ac43c56
Author: Jason Lowe 
Authored: Mon Feb 1 16:12:49 2016 +
Committer: Jason Lowe 
Committed: Mon Feb 1 16:12:49 2016 +

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../org/apache/hadoop/mapred/ClientCache.java   | 24 +++-
 .../hadoop/mapred/ClientServiceDelegate.java| 16 +
 .../org/apache/hadoop/mapred/YARNRunner.java| 11 +
 .../mapred/YarnClientProtocolProvider.java  |  5 ++--
 .../TestYarnClientProtocolProvider.java |  6 +++--
 6 files changed, 60 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b52279ca/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index aeb7597..5c7b53c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -30,6 +30,9 @@ Release 2.6.4 - UNRELEASED
 MAPREDUCE-6554. MRAppMaster servicestart failing with NPE in
 MRAppMaster#parsePreviousJobHistory (Bibin A Chundatt via jlowe)
 
+MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread.
+(Xuan Gong via jlowe)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b52279ca/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
index 4335c82..93ea5c4 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
@@ -22,11 +22,11 @@ import java.io.IOException;
 import java.security.PrivilegedAction;
 import java.util.HashMap;
 import java.util.Map;
-
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
@@ -97,4 +97,26 @@ public class ClientCache {
   }
 });
   }
+
+  public void close() throws IOException {
+if (rm != null) {
+  rm.close();
+}
+
+if (hsProxy != null) {
+  RPC.stopProxy(hsProxy);
+  hsProxy = null;
+}
+
+if (cache != null && !cache.isEmpty()) {
+  for (ClientServiceDelegate delegate : cache.values()) {
+if (delegate != null) {
+  delegate.close();
+  delegate = null;
+}
+  }
+  cache.clear();
+  cache = null;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b52279ca/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 686fa0c..ea8bb1e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++ 

hadoop git commit: MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread. Contributed by Xuan Gong (cherry picked from commit 59a212b6e1265adfa9b55c71b65a22157dfccf77)

2016-02-01 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 ff04a18f0 -> 69c61fae0


MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread. 
Contributed by Xuan Gong
(cherry picked from commit 59a212b6e1265adfa9b55c71b65a22157dfccf77)

Conflicts:

hadoop-mapreduce-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69c61fae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69c61fae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69c61fae

Branch: refs/heads/branch-2.7
Commit: 69c61fae0c7a1501d857430aae6447e518f94a31
Parents: ff04a18
Author: Jason Lowe 
Authored: Mon Feb 1 16:10:56 2016 +
Committer: Jason Lowe 
Committed: Mon Feb 1 16:10:56 2016 +

--
 hadoop-mapreduce-project/CHANGES.txt|  6 +
 .../org/apache/hadoop/mapred/ClientCache.java   | 24 +++-
 .../hadoop/mapred/ClientServiceDelegate.java| 16 +
 .../org/apache/hadoop/mapred/YARNRunner.java| 11 +
 .../mapred/YarnClientProtocolProvider.java  |  5 ++--
 .../TestYarnClientProtocolProvider.java |  6 +++--
 6 files changed, 63 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69c61fae/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b3f47f7..5a99ff0 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -34,6 +34,9 @@ Release 2.7.3 - UNRELEASED
 MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container. (Junping
 Du via jianhe)
 
+MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread.
+(Xuan Gong via jlowe)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES
@@ -334,6 +337,9 @@ Release 2.6.4 - UNRELEASED
 MAPREDUCE-6554. MRAppMaster servicestart failing with NPE in
 MRAppMaster#parsePreviousJobHistory (Bibin A Chundatt via jlowe)
 
+MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread.
+(Xuan Gong via jlowe)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69c61fae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
index 4335c82..93ea5c4 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
@@ -22,11 +22,11 @@ import java.io.IOException;
 import java.security.PrivilegedAction;
 import java.util.HashMap;
 import java.util.Map;
-
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
@@ -97,4 +97,26 @@ public class ClientCache {
   }
 });
   }
+
+  public void close() throws IOException {
+if (rm != null) {
+  rm.close();
+}
+
+if (hsProxy != null) {
+  RPC.stopProxy(hsProxy);
+  hsProxy = null;
+}
+
+if (cache != null && !cache.isEmpty()) {
+  for (ClientServiceDelegate delegate : cache.values()) {
+if (delegate != null) {
+  delegate.close();
+  delegate = null;
+}
+  }
+  cache.clear();
+  cache = null;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69c61fae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 

[hadoop] Git Push Summary

2016-02-01 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/hdfs-7240 [deleted] 312de


[2/3] hadoop git commit: HDFS-9718. HAUtil#getConfForOtherNodes should unset independent generic keys before initialize (Contributed by DENG FEI)

2016-02-01 Thread vinayakumarb
HDFS-9718. HAUtil#getConfForOtherNodes should unset independent generic keys 
before initialize (Contributed by DENG FEI)

(cherry picked from commit 4d7055e0c8d1936d6c87cec14dbbfdff77b837f3)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80dd039b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80dd039b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80dd039b

Branch: refs/heads/branch-2
Commit: 80dd039b6e78d284d09522e61d7ea1fee9425345
Parents: 43131af
Author: Vinayakumar B 
Authored: Tue Feb 2 12:51:35 2016 +0530
Committer: Vinayakumar B 
Committed: Tue Feb 2 12:56:31 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/hdfs/HAUtil.java | 26 
 .../server/namenode/ha/TestHAConfiguration.java | 22 +
 3 files changed, 51 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80dd039b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2c6688a..f334a32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1777,6 +1777,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9701. DN may deadlock when hot-swapping under load. (Xiao Chen via 
lei)
 
+HDFS-9718. HAUtil#getConfForOtherNodes should unset independent generic 
keys
+before initialize (DENG FEI via vinayakumarb)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80dd039b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index 7800596..9185395 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -18,7 +18,16 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_BIND_HOST_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_BIND_HOST_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_RPC_BIND_HOST_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
 
 import java.io.IOException;
@@ -64,6 +73,19 @@ public class HAUtil {
   private static final DelegationTokenSelector tokenSelector =
   new DelegationTokenSelector();
 
+  private static final String[] HA_SPECIAL_INDEPENDENT_KEYS = new String []{
+DFS_NAMENODE_RPC_ADDRESS_KEY,
+DFS_NAMENODE_RPC_BIND_HOST_KEY,
+DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY,
+DFS_NAMENODE_LIFELINE_RPC_BIND_HOST_KEY,
+DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY,
+DFS_NAMENODE_HTTP_ADDRESS_KEY,
+DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+DFS_NAMENODE_HTTP_BIND_HOST_KEY,
+DFS_NAMENODE_HTTPS_BIND_HOST_KEY
+  };
+
   private HAUtil() { /* Hidden constructor */ }
 
   /**
@@ -188,6 +210,10 @@ public class HAUtil {
 
 // Look up the address of the active NN.
 Configuration confForOtherNode = new Configuration(myConf);
+// unset independent properties
+for (String idpKey : HA_SPECIAL_INDEPENDENT_KEYS) {
+  confForOtherNode.unset(idpKey);
+}
 NameNode.initializeGenericKeys(confForOtherNode, nsId, otherNn);
 return confForOtherNode;
   }


[3/3] hadoop git commit: HDFS-9718. HAUtil#getConfForOtherNodes should unset independent generic keys before initialize (Contributed by DENG FEI)

2016-02-01 Thread vinayakumarb
HDFS-9718. HAUtil#getConfForOtherNodes should unset independent generic keys 
before initialize (Contributed by DENG FEI)

(cherry picked from commit 4d7055e0c8d1936d6c87cec14dbbfdff77b837f3)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java

(cherry picked from commit 80dd039b6e78d284d09522e61d7ea1fee9425345)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94950c15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94950c15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94950c15

Branch: refs/heads/branch-2.8
Commit: 94950c15c2f1f0d0353e91cf7a3da447cf232695
Parents: aeea77c
Author: Vinayakumar B 
Authored: Tue Feb 2 12:51:35 2016 +0530
Committer: Vinayakumar B 
Committed: Tue Feb 2 12:57:42 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/hdfs/HAUtil.java | 26 
 .../server/namenode/ha/TestHAConfiguration.java | 22 +
 3 files changed, 51 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94950c15/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c31de2e..53a74e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1695,6 +1695,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9701. DN may deadlock when hot-swapping under load. (Xiao Chen via 
lei)
 
+HDFS-9718. HAUtil#getConfForOtherNodes should unset independent generic 
keys
+before initialize (DENG FEI via vinayakumarb)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94950c15/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index 7800596..9185395 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -18,7 +18,16 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_BIND_HOST_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_BIND_HOST_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_RPC_BIND_HOST_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
 
 import java.io.IOException;
@@ -64,6 +73,19 @@ public class HAUtil {
   private static final DelegationTokenSelector tokenSelector =
   new DelegationTokenSelector();
 
+  private static final String[] HA_SPECIAL_INDEPENDENT_KEYS = new String []{
+DFS_NAMENODE_RPC_ADDRESS_KEY,
+DFS_NAMENODE_RPC_BIND_HOST_KEY,
+DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY,
+DFS_NAMENODE_LIFELINE_RPC_BIND_HOST_KEY,
+DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY,
+DFS_NAMENODE_HTTP_ADDRESS_KEY,
+DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+DFS_NAMENODE_HTTP_BIND_HOST_KEY,
+DFS_NAMENODE_HTTPS_BIND_HOST_KEY
+  };
+
   private HAUtil() { /* Hidden constructor */ }
 
   /**
@@ -188,6 +210,10 @@ public class HAUtil {
 
 // Look up the address of the active NN.
 Configuration confForOtherNode = new Configuration(myConf);
+// unset independent properties
+for (String idpKey : HA_SPECIAL_INDEPENDENT_KEYS) {
+  confForOtherNode.unset(idpKey);
+}
 NameNode.initializeGenericKeys(confForOtherNode, nsId, otherNn);
 return confForOtherNode;
   }


[1/3] hadoop git commit: HDFS-9718. HAUtil#getConfForOtherNodes should unset independent generic keys before initialize (Contributed by DENG FEI)

2016-02-01 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 43131af1e -> 80dd039b6
  refs/heads/branch-2.8 aeea77ce1 -> 94950c15c
  refs/heads/trunk db144eb1c -> 4d7055e0c


HDFS-9718. HAUtil#getConfForOtherNodes should unset independent generic keys 
before initialize (Contributed by DENG FEI)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d7055e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d7055e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d7055e0

Branch: refs/heads/trunk
Commit: 4d7055e0c8d1936d6c87cec14dbbfdff77b837f3
Parents: db144eb
Author: Vinayakumar B 
Authored: Tue Feb 2 12:51:35 2016 +0530
Committer: Vinayakumar B 
Committed: Tue Feb 2 12:51:35 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/hdfs/HAUtil.java | 27 
 .../server/namenode/ha/TestHAConfiguration.java | 23 +
 3 files changed, 53 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d7055e0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f77451b..366f330 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2672,6 +2672,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9701. DN may deadlock when hot-swapping under load. (Xiao Chen via 
lei)
 
+HDFS-9718. HAUtil#getConfForOtherNodes should unset independent generic 
keys
+before initialize (DENG FEI via vinayakumarb)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d7055e0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index ff409c3..7b65abf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -18,7 +18,16 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_BIND_HOST_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_BIND_HOST_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_RPC_BIND_HOST_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
 
 import java.io.IOException;
@@ -64,6 +73,19 @@ public class HAUtil {
   private static final DelegationTokenSelector tokenSelector =
   new DelegationTokenSelector();
 
+  private static final String[] HA_SPECIAL_INDEPENDENT_KEYS = new String[]{
+DFS_NAMENODE_RPC_ADDRESS_KEY,
+DFS_NAMENODE_RPC_BIND_HOST_KEY,
+DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY,
+DFS_NAMENODE_LIFELINE_RPC_BIND_HOST_KEY,
+DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY,
+DFS_NAMENODE_HTTP_ADDRESS_KEY,
+DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+DFS_NAMENODE_HTTP_BIND_HOST_KEY,
+DFS_NAMENODE_HTTPS_BIND_HOST_KEY,
+  };
+
   private HAUtil() { /* Hidden constructor */ }
 
   /**
@@ -188,6 +210,11 @@ public class HAUtil {
 
 // Look up the address of the other NNs
 List confs = new ArrayList(otherNn.size());
+myConf = new Configuration(myConf);
+// unset independent properties
+for (String idpKey : HA_SPECIAL_INDEPENDENT_KEYS) {
+  myConf.unset(idpKey);
+}
 for (String nn : otherNn) {
   Configuration confForOtherNode = new Configuration(myConf);
   NameNode.initializeGenericKeys(confForOtherNode, nsId, nn);


hadoop git commit: YARN-4100. Add Documentation for Distributed and Delegated-Centralized Node Labels feature. Contributed by Naganarasimha G R.

2016-02-01 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c487453b9 -> aeea77ce1


YARN-4100. Add Documentation for Distributed and Delegated-Centralized
Node Labels feature. Contributed by Naganarasimha G R.

(cherry picked from commit db144eb1c51c1f37bdd1e0c18e9a5b0969c82e33)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aeea77ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aeea77ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aeea77ce

Branch: refs/heads/branch-2.8
Commit: aeea77ce147c8f53a868274654df693437e1c435
Parents: c487453
Author: Devaraj K 
Authored: Tue Feb 2 12:06:51 2016 +0530
Committer: Devaraj K 
Committed: Tue Feb 2 12:08:56 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../src/main/resources/yarn-default.xml | 50 ++--
 .../src/site/markdown/NodeLabel.md  | 86 
 3 files changed, 99 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aeea77ce/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 901a1eb..636db91 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -592,6 +592,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-4340. Add "list" API to reservation system. (Sean Po via wangda)
 
+YARN-4100. Add Documentation for Distributed and Delegated-Centralized
+Node Labels feature. (Naganarasimha G R via devaraj)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aeea77ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 0add988..80f0fea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2281,26 +2281,26 @@
   
   
 
-When "yarn.node-labels.configuration-type" parameter in RM is configured as
-"distributed", Administrators can configure in NM, the provider for
the
+When "yarn.node-labels.configuration-type" is configured with "distributed"
+in RM, Administrators can configure in NM the provider for the
 node labels by configuring this parameter. Administrators can
-specify "config", "script" or the class name of the provider. Configured
+configure "config", "script" or the class name of the provider. Configured
 class needs to extend
 org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider.
-If "config" is specified then "ConfigurationNodeLabelsProvider" and
-"script" then "ScriptNodeLabelsProvider" will be used.
+If "config" is configured, then "ConfigurationNodeLabelsProvider" and if
+"script" is configured, then "ScriptNodeLabelsProvider" will be used.
 
 yarn.nodemanager.node-labels.provider
   
 
   
 
-When node labels "yarn.nodemanager.node-labels.provider" is of type
-"config" or the configured class extends AbstractNodeLabelsProvider then
-periodically node labels are retrieved from the node labels provider.
-This configuration is to define the interval. If -1 is configured then
-node labels are retrieved from. provider only during initialization.
-Defaults to 10 mins.
+When "yarn.nodemanager.node-labels.provider" is configured with "config",
+"Script" or the configured class extends AbstractNodeLabelsProvider, then
+periodically node labels are retrieved from the node labels provider. This
+configuration is to define the interval period.
+If -1 is configured then node labels are retrieved from provider only
+during initialization. Defaults to 10 mins.
 
 yarn.nodemanager.node-labels.provider.fetch-interval-ms
 60
@@ -2308,8 +2308,8 @@
 
   
 
-   Interval at which node labels syncs with RM from NM.Will send loaded labels
-   every x intervals configured along with heartbeat from NM to RM.
+   Interval at which NM syncs its node labels with RM. NM will send its loaded
+   labels every x intervals configured, along with heartbeat to RM.
 
 yarn.nodemanager.node-labels.resync-interval-ms
 12
@@ -2317,19 +2317,18 @@
 
   
 
-When node labels "yarn.nodemanager.node-labels.provider"
-is of type "config" then 

hadoop git commit: YARN-4100. Add Documentation for Distributed and Delegated-Centralized Node Labels feature. Contributed by Naganarasimha G R.

2016-02-01 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1cd55e0c1 -> db144eb1c


YARN-4100. Add Documentation for Distributed and Delegated-Centralized
Node Labels feature. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db144eb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db144eb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db144eb1

Branch: refs/heads/trunk
Commit: db144eb1c51c1f37bdd1e0c18e9a5b0969c82e33
Parents: 1cd55e0
Author: Devaraj K 
Authored: Tue Feb 2 12:06:51 2016 +0530
Committer: Devaraj K 
Committed: Tue Feb 2 12:06:51 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../src/main/resources/yarn-default.xml | 50 ++--
 .../src/site/markdown/NodeLabel.md  | 86 
 3 files changed, 99 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db144eb1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bf46864..345c64b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -778,6 +778,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-4340. Add "list" API to reservation system. (Sean Po via wangda)
 
+YARN-4100. Add Documentation for Distributed and Delegated-Centralized
+Node Labels feature. (Naganarasimha G R via devaraj)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db144eb1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index e33d23e..d8ea3ad 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2281,26 +2281,26 @@
   
   
 
-When "yarn.node-labels.configuration-type" parameter in RM is configured as
-"distributed", Administrators can configure in NM, the provider for
the
+When "yarn.node-labels.configuration-type" is configured with "distributed"
+in RM, Administrators can configure in NM the provider for the
 node labels by configuring this parameter. Administrators can
-specify "config", "script" or the class name of the provider. Configured
+configure "config", "script" or the class name of the provider. Configured
 class needs to extend
 org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider.
-If "config" is specified then "ConfigurationNodeLabelsProvider" and
-"script" then "ScriptNodeLabelsProvider" will be used.
+If "config" is configured, then "ConfigurationNodeLabelsProvider" and if
+"script" is configured, then "ScriptNodeLabelsProvider" will be used.
 
 yarn.nodemanager.node-labels.provider
   
 
   
 
-When node labels "yarn.nodemanager.node-labels.provider" is of type
-"config" or the configured class extends AbstractNodeLabelsProvider then
-periodically node labels are retrieved from the node labels provider.
-This configuration is to define the interval. If -1 is configured then
-node labels are retrieved from. provider only during initialization.
-Defaults to 10 mins.
+When "yarn.nodemanager.node-labels.provider" is configured with "config",
+"Script" or the configured class extends AbstractNodeLabelsProvider, then
+periodically node labels are retrieved from the node labels provider. This
+configuration is to define the interval period.
+If -1 is configured then node labels are retrieved from provider only
+during initialization. Defaults to 10 mins.
 
 yarn.nodemanager.node-labels.provider.fetch-interval-ms
 60
@@ -2308,8 +2308,8 @@
 
   
 
-   Interval at which node labels syncs with RM from NM.Will send loaded labels
-   every x intervals configured along with heartbeat from NM to RM.
+   Interval at which NM syncs its node labels with RM. NM will send its loaded
+   labels every x intervals configured, along with heartbeat to RM.
 
 yarn.nodemanager.node-labels.resync-interval-ms
 12
@@ -2317,19 +2317,18 @@
 
   
 
-When node labels "yarn.nodemanager.node-labels.provider"
-is of type "config" then ConfigurationNodeLabelsProvider fetches the
-partition from this parameter.

hadoop git commit: YARN-4100. Add Documentation for Distributed and Delegated-Centralized Node Labels feature. Contributed by Naganarasimha G R.

2016-02-01 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bbda44688 -> 43131af1e


YARN-4100. Add Documentation for Distributed and Delegated-Centralized
Node Labels feature. Contributed by Naganarasimha G R.

(cherry picked from commit db144eb1c51c1f37bdd1e0c18e9a5b0969c82e33)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43131af1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43131af1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43131af1

Branch: refs/heads/branch-2
Commit: 43131af1e21cde903c0203e935545251a32136f1
Parents: bbda446
Author: Devaraj K 
Authored: Tue Feb 2 12:06:51 2016 +0530
Committer: Devaraj K 
Committed: Tue Feb 2 12:07:49 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../src/main/resources/yarn-default.xml | 50 ++--
 .../src/site/markdown/NodeLabel.md  | 86 
 3 files changed, 99 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43131af1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 94b62e1..d6e1d4d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -723,6 +723,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-4340. Add "list" API to reservation system. (Sean Po via wangda)
 
+YARN-4100. Add Documentation for Distributed and Delegated-Centralized
+Node Labels feature. (Naganarasimha G R via devaraj)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43131af1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index e33d23e..d8ea3ad 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2281,26 +2281,26 @@
   
   
 
-When "yarn.node-labels.configuration-type" parameter in RM is configured as
-"distributed", Administrators can configure in NM, the provider for
the
+When "yarn.node-labels.configuration-type" is configured with "distributed"
+in RM, Administrators can configure in NM the provider for the
 node labels by configuring this parameter. Administrators can
-specify "config", "script" or the class name of the provider. Configured
+configure "config", "script" or the class name of the provider. Configured
 class needs to extend
 org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider.
-If "config" is specified then "ConfigurationNodeLabelsProvider" and
-"script" then "ScriptNodeLabelsProvider" will be used.
+If "config" is configured, then "ConfigurationNodeLabelsProvider" and if
+"script" is configured, then "ScriptNodeLabelsProvider" will be used.
 
 yarn.nodemanager.node-labels.provider
   
 
   
 
-When node labels "yarn.nodemanager.node-labels.provider" is of type
-"config" or the configured class extends AbstractNodeLabelsProvider then
-periodically node labels are retrieved from the node labels provider.
-This configuration is to define the interval. If -1 is configured then
-node labels are retrieved from. provider only during initialization.
-Defaults to 10 mins.
+When "yarn.nodemanager.node-labels.provider" is configured with "config",
+"Script" or the configured class extends AbstractNodeLabelsProvider, then
+periodically node labels are retrieved from the node labels provider. This
+configuration is to define the interval period.
+If -1 is configured then node labels are retrieved from provider only
+during initialization. Defaults to 10 mins.
 
 yarn.nodemanager.node-labels.provider.fetch-interval-ms
 60
@@ -2308,8 +2308,8 @@
 
   
 
-   Interval at which node labels syncs with RM from NM.Will send loaded labels
-   every x intervals configured along with heartbeat from NM to RM.
+   Interval at which NM syncs its node labels with RM. NM will send its loaded
+   labels every x intervals configured, along with heartbeat to RM.
 
 yarn.nodemanager.node-labels.resync-interval-ms
 12
@@ -2317,19 +2317,18 @@
 
   
 
-When node labels "yarn.nodemanager.node-labels.provider"
-is of type "config" then 

[1/2] hadoop git commit: YARN-4649. Add additional logging to some NM state store operations. Contributed by Sidharta Seethana

2016-02-01 Thread vvasudev
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9591363ad -> bbda44688
  refs/heads/trunk 9875325d5 -> 1cd55e0c1


YARN-4649. Add additional logging to some NM state store operations. 
Contributed by Sidharta Seethana


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1cd55e0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1cd55e0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1cd55e0c

Branch: refs/heads/trunk
Commit: 1cd55e0c171f7c4dec6f843931285557d59cd5ea
Parents: 9875325
Author: Varun Vasudev 
Authored: Tue Feb 2 09:25:23 2016 +0530
Committer: Varun Vasudev 
Committed: Tue Feb 2 09:25:23 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../containermanager/ContainerManagerImpl.java  | 14 +++
 .../recovery/NMLeveldbStateStoreService.java| 44 
 .../recovery/NMStateStoreService.java   | 11 +
 4 files changed, 72 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cd55e0c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 90742db..bf46864 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -115,6 +115,9 @@ Release 2.9.0 - UNRELEASED
 
 YARN-4647. Make RegisterNodeManagerRequestPBImpl thread-safe. (kasha)
 
+YARN-4649. Add additional logging to some NM state store operations.
+(Sidharta Seethana via vvasudev)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cd55e0c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index d0663d5..7d51477 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -286,18 +286,32 @@ public class ContainerManagerImpl extends 
CompositeService implements
   RecoveredApplicationsState appsState = 
stateStore.loadApplicationsState();
   for (ContainerManagerApplicationProto proto :
appsState.getApplications()) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Recovering application with state: " + proto.toString());
+}
 recoverApplication(proto);
   }
 
   for (RecoveredContainerState rcs : stateStore.loadContainersState()) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Recovering container with state: " + rcs);
+}
+
 recoverContainer(rcs);
   }
 
   String diagnostic = "Application marked finished during recovery";
   for (ApplicationId appId : appsState.getFinishedApplications()) {
+
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Application marked finished during recovery: " + appId);
+}
+
 dispatcher.getEventHandler().handle(
 new ApplicationFinishEvent(appId, diagnostic));
   }
+} else {
+  LOG.info("Not a recoverable state store. Nothing to recover.");
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cd55e0c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index 89c71bb..81d6c57 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ 

[2/2] hadoop git commit: YARN-4649. Add additional logging to some NM state store operations. Contributed by Sidharta Seethana

2016-02-01 Thread vvasudev
YARN-4649. Add additional logging to some NM state store operations. 
Contributed by Sidharta Seethana

(cherry picked from commit 1cd55e0c171f7c4dec6f843931285557d59cd5ea)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbda4468
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbda4468
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbda4468

Branch: refs/heads/branch-2
Commit: bbda446889eb07391619131524fe75a411c97017
Parents: 9591363
Author: Varun Vasudev 
Authored: Tue Feb 2 09:25:23 2016 +0530
Committer: Varun Vasudev 
Committed: Tue Feb 2 09:26:22 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../containermanager/ContainerManagerImpl.java  | 14 +++
 .../recovery/NMLeveldbStateStoreService.java| 44 
 .../recovery/NMStateStoreService.java   | 11 +
 4 files changed, 72 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbda4468/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 68a364e..94b62e1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -57,6 +57,9 @@ Release 2.9.0 - UNRELEASED
 
 YARN-4647. Make RegisterNodeManagerRequestPBImpl thread-safe. (kasha)
 
+YARN-4649. Add additional logging to some NM state store operations.
+(Sidharta Seethana via vvasudev)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbda4468/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index d0663d5..7d51477 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -286,18 +286,32 @@ public class ContainerManagerImpl extends 
CompositeService implements
   RecoveredApplicationsState appsState = 
stateStore.loadApplicationsState();
   for (ContainerManagerApplicationProto proto :
appsState.getApplications()) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Recovering application with state: " + proto.toString());
+}
 recoverApplication(proto);
   }
 
   for (RecoveredContainerState rcs : stateStore.loadContainersState()) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Recovering container with state: " + rcs);
+}
+
 recoverContainer(rcs);
   }
 
   String diagnostic = "Application marked finished during recovery";
   for (ApplicationId appId : appsState.getFinishedApplications()) {
+
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Application marked finished during recovery: " + appId);
+}
+
 dispatcher.getEventHandler().handle(
 new ApplicationFinishEvent(appId, diagnostic));
   }
+} else {
+  LOG.info("Not a recoverable state store. Nothing to recover.");
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbda4468/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index 89c71bb..81d6c57 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ 

[12/50] [abbrv] hadoop git commit: HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.

2016-02-01 Thread aengineer
HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45c763ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45c763ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45c763ad

Branch: refs/heads/HDFS-7240
Commit: 45c763ad6171bc7808c2ddcb9099a4215113da2a
Parents: bd909ed
Author: Tsz-Wo Nicholas Sze 
Authored: Tue Jan 26 11:20:13 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Tue Jan 26 11:20:13 2016 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 11 +++---
 .../hadoop/hdfs/TestDFSClientRetries.java   | 36 +++-
 3 files changed, 35 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45c763ad/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a14a1d8..56a85f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2696,6 +2696,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently (Mingliang 
Liu
 via jitendra)
 
+HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.
+(szetszwo)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45c763ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 914fbd9..6ba8e1c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -184,17 +184,16 @@ class FSDirWriteFileOp {
 src = fsn.dir.resolvePath(pc, src, pathComponents);
 FileState fileState = analyzeFileState(fsn, src, fileId, clientName,
previous, onRetryBlock);
-final INodeFile pendingFile = fileState.inode;
-// Check if the penultimate block is minimally replicated
-if (!fsn.checkFileProgress(src, pendingFile, false)) {
-  throw new NotReplicatedYetException("Not replicated yet: " + src);
-}
-
 if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
   // This is a retry. No need to generate new locations.
   // Use the last block if it has locations.
   return null;
 }
+
+final INodeFile pendingFile = fileState.inode;
+if (!fsn.checkFileProgress(src, pendingFile, false)) {
+  throw new NotReplicatedYetException("Not replicated yet: " + src);
+}
 if (pendingFile.getBlocks().length >= fsn.maxBlocksPerFile) {
   throw new IOException("File has reached the limit on maximum number of"
   + " blocks (" + DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45c763ad/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index e41c06a..1f783f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -432,19 +432,37 @@ public class TestDFSClientRetries {
   // Make the call to addBlock() get called twice, as if it were retried
   // due to an IPC issue.
   doAnswer(new Answer() {
-@Override
-public LocatedBlock answer(InvocationOnMock invocation) throws 
Throwable {
-  LocatedBlock ret = (LocatedBlock) invocation.callRealMethod();
+private int getBlockCount(LocatedBlock ret) throws IOException {
   LocatedBlocks lb = cluster.getNameNodeRpc().getBlockLocations(src, 
0, Long.MAX_VALUE);
-  int blockCount = lb.getLocatedBlocks().size();
   assertEquals(lb.getLastLocatedBlock().getBlock(), ret.getBlock());
-  
+  return 

[43/50] [abbrv] hadoop git commit: YARN-4428. Redirect RM page to AHS page when AHS turned on and RM page is not available. Contributed by Chang Li

2016-02-01 Thread aengineer
YARN-4428. Redirect RM page to AHS page when AHS turned on and RM page is not 
available. Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/772ea7b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/772ea7b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/772ea7b4

Branch: refs/heads/HDFS-7240
Commit: 772ea7b41b06beaa1f4ac4fa86eac8d6e6c8cd36
Parents: f4a57d4
Author: Jason Lowe 
Authored: Fri Jan 29 21:48:54 2016 +
Committer: Jason Lowe 
Committed: Fri Jan 29 21:48:54 2016 +

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../rmapp/attempt/RMAppAttemptImpl.java | 23 -
 .../server/resourcemanager/webapp/RMWebApp.java |  5 ++
 .../resourcemanager/webapp/RMWebAppFilter.java  | 90 
 .../attempt/TestRMAppAttemptTransitions.java| 78 -
 5 files changed, 197 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/772ea7b4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ebbf0f4..d82a9be 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1441,6 +1441,9 @@ Release 2.7.3 - UNRELEASED
 YARN-4598. Invalid event: RESOURCE_FAILED at
 CONTAINER_CLEANEDUP_AFTER_KILL (tangshangwen via jlowe)
 
+YARN-4428. Redirect RM page to AHS page when AHS turned on and RM page is
+not available (Chang Li via jlowe)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/772ea7b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 99f1b21..16456ae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -628,6 +629,21 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 }
   }
 
+  private void setTrackingUrlToAHSPage(RMAppAttemptState stateToBeStored) {
+originalTrackingUrl = pjoin(
+WebAppUtils.getHttpSchemePrefix(conf) +
+WebAppUtils.getAHSWebAppURLWithoutScheme(conf),
+"applicationhistory", "app", getAppAttemptId().getApplicationId());
+switch (stateToBeStored) {
+case KILLED:
+case FAILED:
+  proxiedTrackingUrl = originalTrackingUrl;
+  break;
+default:
+  break;
+}
+  }
+
   private void invalidateAMHostAndPort() {
 this.host = "N/A";
 this.rpcPort = -1;
@@ -1211,7 +1227,12 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 String diags = null;
 
 // don't leave the tracking URL pointing to a non-existent AM
-setTrackingUrlToRMAppPage(stateToBeStored);
+if (conf.getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED,
+YarnConfiguration.DEFAULT_APPLICATION_HISTORY_ENABLED)) {
+  setTrackingUrlToAHSPage(stateToBeStored);
+} else {
+  setTrackingUrlToRMAppPage(stateToBeStored);
+}
 String finalTrackingUrl = getOriginalTrackingUrl();
 FinalApplicationStatus finalStatus = null;
 int exitStatus = ContainerExitStatus.INVALID;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/772ea7b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java

[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2016-02-01 Thread aengineer
Merge branch 'trunk' into HDFS-7240

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16440b83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16440b83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16440b83

Branch: refs/heads/HDFS-7240
Commit: 16440b83626a1f11ace8f4f9b61e4de254ff164e
Parents: bbe9e8b 59a212b
Author: Anu Engineer 
Authored: Mon Feb 1 10:36:58 2016 -0800
Committer: Anu Engineer 
Committed: Mon Feb 1 10:36:58 2016 -0800

--
 BUILDING.txt|   16 +-
 dev-support/README.md   |   57 +
 dev-support/bin/releasedocmaker |   18 +
 dev-support/bin/shelldocs   |   18 +
 dev-support/bin/smart-apply-patch   |   18 +
 dev-support/bin/test-patch  |   18 +
 dev-support/bin/yetus-wrapper   |  176 ++
 dev-support/releasedocmaker.py  |  580 
 dev-support/shelldocs.py|  271 --
 dev-support/smart-apply-patch.sh|  187 --
 dev-support/test-patch.d/checkstyle.sh  |  205 --
 dev-support/test-patch.d/shellcheck.sh  |  178 --
 dev-support/test-patch.d/whitespace.sh  |   46 -
 dev-support/test-patch.sh   | 2814 --
 .../main/resources/checkstyle/checkstyle.xml|   12 +-
 .../server/AuthenticationFilter.java|   11 +-
 .../security/authentication/util/AuthToken.java |   16 +-
 .../authentication/util/KerberosUtil.java   |   87 +-
 .../server/TestAuthenticationFilter.java|  167 +-
 .../authentication/util/TestKerberosUtil.java   |   31 +-
 .../util/TestZKSignerSecretProvider.java|   56 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |  286 +-
 hadoop-common-project/hadoop-common/pom.xml |   98 +-
 .../hadoop-common/src/CMakeLists.txt|3 +-
 .../src/main/bin/hadoop-config.cmd  |   13 +-
 .../src/main/conf/log4j.properties  |4 +-
 .../crypto/key/kms/KMSClientProvider.java   |   14 +-
 .../apache/hadoop/fs/AbstractFileSystem.java|2 +-
 .../hadoop/fs/CommonConfigurationKeys.java  |5 +
 .../fs/CommonConfigurationKeysPublic.java   |9 +
 .../org/apache/hadoop/fs/ContentSummary.java|  241 +-
 .../java/org/apache/hadoop/fs/FileContext.java  |6 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  111 +-
 .../org/apache/hadoop/fs/FilterFileSystem.java  |   11 +
 .../main/java/org/apache/hadoop/fs/FsShell.java |   12 +-
 .../java/org/apache/hadoop/fs/QuotaUsage.java   |  359 +++
 .../apache/hadoop/fs/RawLocalFileSystem.java|5 +
 .../main/java/org/apache/hadoop/fs/Trash.java   |   14 +-
 .../java/org/apache/hadoop/fs/TrashPolicy.java  |   56 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java|  193 +-
 .../hadoop/fs/permission/FsPermission.java  |   25 +-
 .../apache/hadoop/fs/shell/CopyCommands.java|   40 +-
 .../java/org/apache/hadoop/fs/shell/Count.java  |   37 +-
 .../hadoop/fs/viewfs/ChRootedFileSystem.java|7 +-
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |   15 +-
 .../org/apache/hadoop/ha/SshFenceByTcpPort.java |4 +-
 .../org/apache/hadoop/http/HttpServer2.java |   25 +-
 .../apache/hadoop/io/AbstractMapWritable.java   |   54 +-
 .../main/java/org/apache/hadoop/io/IOUtils.java |5 +-
 .../org/apache/hadoop/io/SortedMapWritable.java |   47 +-
 .../coder/AbstractErasureDecoder.java   |2 +-
 .../coder/AbstractHHErasureCodingStep.java  |   49 +
 .../erasurecode/coder/HHXORErasureDecoder.java  |   95 +
 .../coder/HHXORErasureDecodingStep.java |  349 +++
 .../erasurecode/coder/HHXORErasureEncoder.java  |   92 +
 .../coder/HHXORErasureEncodingStep.java |  146 +
 .../io/erasurecode/coder/util/HHUtil.java   |  216 ++
 .../rawcoder/AbstractRawErasureDecoder.java |   55 +-
 .../rawcoder/AbstractRawErasureEncoder.java |   56 +-
 .../erasurecode/rawcoder/RawErasureDecoder.java |5 +-
 .../erasurecode/rawcoder/RawErasureEncoder.java |5 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |5 +-
 .../apache/hadoop/ipc/DecayRpcScheduler.java|2 +-
 

[02/50] [abbrv] hadoop git commit: HADOOP-12731. Remove useless boxing/unboxing code. Contributed by Kousuke Saruta.

2016-02-01 Thread aengineer
HADOOP-12731. Remove useless boxing/unboxing code. Contributed by Kousuke 
Saruta.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/736eb17a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/736eb17a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/736eb17a

Branch: refs/heads/HDFS-7240
Commit: 736eb17a796a1c1ad5f4db2c6a64f6752db7bec3
Parents: 2b83329
Author: Akira Ajisaka 
Authored: Mon Jan 25 13:47:29 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Jan 25 13:47:29 2016 +0900

--
 .../util/TestZKSignerSecretProvider.java|  2 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/fs/FileContext.java  |  4 +-
 .../org/apache/hadoop/ha/SshFenceByTcpPort.java |  4 +-
 .../apache/hadoop/io/AbstractMapWritable.java   | 54 +++-
 .../hadoop/security/ShellBasedIdMapping.java|  9 ++--
 .../java/org/apache/hadoop/util/bloom/Key.java  |  4 +-
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |  8 +--
 .../apache/hadoop/test/GenericTestUtils.java|  2 +-
 .../gridmix/DistributedCacheEmulator.java   |  4 +-
 .../hadoop/mapred/gridmix/CommonJobTest.java|  2 +-
 .../mapred/gridmix/TestPseudoLocalFs.java   |  2 +-
 .../apache/hadoop/streaming/DelayEchoApp.java   |  2 +-
 13 files changed, 43 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/736eb17a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
index 4f8b5ae..8211314 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
@@ -35,7 +35,7 @@ public class TestZKSignerSecretProvider {
 
   // rollover every 2 sec
   private final int timeout = 4000;
-  private final long rolloverFrequency = Long.valueOf(timeout / 2);
+  private final long rolloverFrequency = timeout / 2;
 
   @Before
   public void setup() throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736eb17a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 36cac2f..3db68fb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1645,6 +1645,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12730. Hadoop streaming -mapper and -reducer options are wrongly
 documented as required. (Kengo Seki via aajisaka)
 
+HADOOP-12731. Remove useless boxing/unboxing code.
+(Kousuke Saruta via aajisaka)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736eb17a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 2456154..d96abad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -761,7 +761,7 @@ public class FileContext {
   @Override
   public Boolean next(final AbstractFileSystem fs, final Path p) 
 throws IOException, UnresolvedLinkException {
-return Boolean.valueOf(fs.delete(p, recursive));
+return fs.delete(p, recursive);
   }
 }.resolve(this, absF);
   }
@@ -895,7 +895,7 @@ public class FileContext {
   @Override
   public Boolean next(final AbstractFileSystem fs, final Path p) 
 throws IOException, UnresolvedLinkException {
-return Boolean.valueOf(fs.setReplication(p, replication));
+return fs.setReplication(p, replication);
   }
 }.resolve(this, absF);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736eb17a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java

[36/50] [abbrv] hadoop git commit: YARN-4543. Fix random test failure in TestNodeStatusUpdater.testStopReentrant. (Akihiro Suda via rohithsharmaks)

2016-02-01 Thread aengineer
YARN-4543. Fix random test failure in TestNodeStatusUpdater.testStopReentrant. 
(Akihiro Suda via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac686668
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac686668
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac686668

Branch: refs/heads/HDFS-7240
Commit: ac686668031ee9837deed3f3566f09f33c437870
Parents: 09d831c
Author: Rohith Sharma K S 
Authored: Fri Jan 29 12:29:54 2016 +0530
Committer: Rohith Sharma K S 
Committed: Fri Jan 29 12:29:54 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../yarn/server/nodemanager/TestNodeStatusUpdater.java  | 9 -
 2 files changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac686668/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ee57e4b..dd61f2a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -173,6 +173,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4519. Potential deadlock of CapacityScheduler between decrease 
container
 and assign containers. (Meng Ding via jianhe)
 
+YARN-4543. Fix random test failure in 
TestNodeStatusUpdater.testStopReentrant
+(Akihiro Suda via rohithsharmaks)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac686668/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index a8066c1..9e6868d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -1269,7 +1269,14 @@ public class TestNodeStatusUpdater {
 }
 
 Assert.assertEquals(STATE.STOPPED, nm.getServiceState());
-Assert.assertEquals(numCleanups.get(), 1);
+
+// It further takes a while after NM reached the STOPPED state.
+waitCount = 0;
+while (numCleanups.get() == 0 && waitCount++ != 20) {
+  LOG.info("Waiting for NM shutdown..");
+  Thread.sleep(1000);
+}
+Assert.assertEquals(1, numCleanups.get());
   }
 
   @Test



[28/50] [abbrv] hadoop git commit: YARN-4633. Fix random test failure in TestRMRestart#testRMRestartAfterPreemption. (Bibin A Chundatt via rohithsharmaks)

2016-02-01 Thread aengineer
YARN-4633. Fix random test failure in 
TestRMRestart#testRMRestartAfterPreemption. (Bibin A Chundatt via 
rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef343be8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef343be8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef343be8

Branch: refs/heads/HDFS-7240
Commit: ef343be82b4268ebd52f6a11e1a1ce53a5d232a9
Parents: 86560a4
Author: Rohith Sharma K S 
Authored: Thu Jan 28 21:53:45 2016 +0530
Committer: Rohith Sharma K S 
Committed: Thu Jan 28 21:53:45 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java  | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef343be8/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2fae034..c8a8c06 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -167,6 +167,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4573. Fix test failure in TestRMAppTransitions#testAppRunningKill and
 testAppKilledKilled. (Takashi Ohnishi via rohithsharmaks)
 
+YARN-4633. Fix random test failure in 
TestRMRestart#testRMRestartAfterPreemption
+(Bibin A Chundatt via rohithsharmaks)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef343be8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index 3bab88a..e999e6b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -2359,6 +2359,7 @@ public class TestRMRestart extends 
ParameterizedSchedulerTestBase {
   // kill app0-attempt
   cs.killPreemptedContainer(schedulerAppAttempt.getRMContainer(
   app0.getCurrentAppAttempt().getMasterContainer().getId()));
+  am0.waitForState(RMAppAttemptState.FAILED);
 }
 am0 = MockRM.launchAM(app0, rm1, nm1);
 am0.registerAppAttempt();



[08/50] [abbrv] hadoop git commit: Release process for 2.7.2: Set the release date for 2.7.2

2016-02-01 Thread aengineer
Release process for 2.7.2: Set the release date for 2.7.2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec4d2d9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec4d2d9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec4d2d9f

Branch: refs/heads/HDFS-7240
Commit: ec4d2d9f40c1cb52ca4561b3d010ffc046a73495
Parents: 992dd2f
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Authored: Mon Jan 25 15:45:12 2016 -0800
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Committed: Mon Jan 25 15:45:57 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
 hadoop-mapreduce-project/CHANGES.txt| 2 +-
 hadoop-yarn-project/CHANGES.txt | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec4d2d9f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9606296..5121a83 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1693,7 +1693,7 @@ Release 2.7.3 - UNRELEASED
 HADOOP-12706. TestLocalFsFCStatistics#testStatisticsThreadLocalDataCleanUp
 times out occasionally (Sangjin Lee and Colin Patrick McCabe via jlowe)
 
-Release 2.7.2 - UNRELEASED
+Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec4d2d9f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e5285b6..f35ae3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2690,7 +2690,7 @@ Release 2.7.3 - UNRELEASED
 HDFS-9625. set replication for empty file failed when set storage policy
 (DENG FEI via vinayakumarb)
 
-Release 2.7.2 - UNRELEASED
+Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec4d2d9f/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ba392c3..8f35c6f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -722,7 +722,7 @@ Release 2.7.3 - UNRELEASED
 MAPREDUCE-6554. MRAppMaster servicestart failing with NPE in
 MRAppMaster#parsePreviousJobHistory (Bibin A Chundatt via jlowe)
 
-Release 2.7.2 - UNRELEASED
+Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec4d2d9f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e5049d9..41802ae 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1406,7 +1406,7 @@ Release 2.7.3 - UNRELEASED
 YARN-4598. Invalid event: RESOURCE_FAILED at
 CONTAINER_CLEANEDUP_AFTER_KILL (tangshangwen via jlowe)
 
-Release 2.7.2 - UNRELEASED
+Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES
 



[35/50] [abbrv] hadoop git commit: YARN-4219. addendum patch to fix javadoc errors

2016-02-01 Thread aengineer
YARN-4219. addendum patch to fix javadoc errors


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09d831c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09d831c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09d831c9

Branch: refs/heads/HDFS-7240
Commit: 09d831c95ba18e2892cddd749f6e06f112dda7f5
Parents: f67149a
Author: Rohith Sharma K S 
Authored: Fri Jan 29 11:51:47 2016 +0530
Committer: Rohith Sharma K S 
Committed: Fri Jan 29 11:51:47 2016 +0530

--
 .../hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09d831c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
index 976241f..3ff5dd7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
@@ -46,11 +46,11 @@ import java.util.Map;
  * There are two partitions of the key space. One partition is to store a
  * entity id to start time mapping:
  *
- * i!ENTITY_ID!ENTITY_TYPE -> ENTITY_START_TIME
+ * i!ENTITY_ID!ENTITY_TYPE to ENTITY_START_TIME
  *
  * The other partition is to store the actual data:
  *
- * e!START_TIME!ENTITY_ID!ENTITY_TYPE -> ENTITY_BYTES
+ * e!START_TIME!ENTITY_ID!ENTITY_TYPE to ENTITY_BYTES
  *
  * This storage does not have any garbage collection mechanism, and is designed
  * mainly for caching usages.



[16/50] [abbrv] hadoop git commit: YARN-4612. Fix rumen and scheduler load simulator handle killed tasks properly. Contributed by Ming Ma.

2016-02-01 Thread aengineer
YARN-4612. Fix rumen and scheduler load simulator handle killed tasks
properly. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4efdf3a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4efdf3a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4efdf3a9

Branch: refs/heads/HDFS-7240
Commit: 4efdf3a979c361348612f817a3253be6d0de58f7
Parents: d323639
Author: Xuan 
Authored: Tue Jan 26 18:17:12 2016 -0800
Committer: Xuan 
Committed: Tue Jan 26 18:17:12 2016 -0800

--
 .../apache/hadoop/tools/rumen/JobBuilder.java   |  11 +-
 .../src/main/data/2jobs2min-rumen-jh.json   | 606 +++
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |   6 +
 .../apache/hadoop/yarn/sls/utils/SLSUtils.java  |   6 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 5 files changed, 628 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4efdf3a9/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobBuilder.java
--
diff --git 
a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobBuilder.java
 
b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobBuilder.java
index c5ae2fc..890f388 100644
--- 
a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobBuilder.java
+++ 
b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobBuilder.java
@@ -473,9 +473,12 @@ public class JobBuilder {
 task.setTaskStatus(getPre21Value(event.getTaskStatus()));
 TaskFailed t = (TaskFailed)(event.getDatum());
 task.putDiagnosticInfo(t.error.toString());
-task.putFailedDueToAttemptId(t.failedDueToAttempt.toString());
+// killed task wouldn't have failed attempt.
+if (t.getFailedDueToAttempt() != null) {
+  task.putFailedDueToAttemptId(t.getFailedDueToAttempt().toString());
+}
 org.apache.hadoop.mapreduce.jobhistory.JhCounters counters =
-((TaskFailed) event.getDatum()).counters;
+((TaskFailed) event.getDatum()).getCounters();
 task.incorporateCounters(
 counters == null ? EMPTY_COUNTERS : counters);
   }
@@ -500,7 +503,7 @@ public class JobBuilder {
 
 attempt.setFinishTime(event.getFinishTime());
 org.apache.hadoop.mapreduce.jobhistory.JhCounters counters =
-((TaskAttemptUnsuccessfulCompletion) event.getDatum()).counters;
+((TaskAttemptUnsuccessfulCompletion) event.getDatum()).getCounters();
 attempt.incorporateCounters(
 counters == null ? EMPTY_COUNTERS : counters);
 attempt.arraySetClockSplits(event.getClockSplits());
@@ -509,7 +512,7 @@ public class JobBuilder {
 attempt.arraySetPhysMemKbytes(event.getPhysMemKbytes());
 TaskAttemptUnsuccessfulCompletion t =
 (TaskAttemptUnsuccessfulCompletion) (event.getDatum());
-attempt.putDiagnosticInfo(t.error.toString());
+attempt.putDiagnosticInfo(t.getError().toString());
   }
 
   private void processTaskAttemptStartedEvent(TaskAttemptStartedEvent event) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4efdf3a9/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
--
diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json 
b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
index 83629ed..9d90deb 100644
--- a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
+++ b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
@@ -10208,4 +10208,610 @@
   "clusterReduceMB" : -1,
   "jobMapMB" : 200,
   "jobReduceMB" : 200
+} {
+"priority" : "NORMAL",
+"jobID" : "job_1369942127770_1207",
+"user" : "jenkins",
+"jobName" : "TeraGen",
+"submitTime" : 1371223054499,
+"finishTime" : 1371223153874,
+"queue" : "sls_queue_1",
+"mapTasks" : [ {
+"startTime" : 1371223059053,
+"taskID" : "task_1369942127770_1207_m_00",
+"taskType" : "MAP",
+"finishTime" : 1371223078206,
+"attempts" : [ ],
+"preferredLocations" : [ ],
+"taskStatus" : "KILLED",
+"inputBytes" : -1,
+"inputRecords" : -1,
+"outputBytes" : -1,
+"outputRecords" : -1
+} ],
+"reduceTasks" : [ ],
+"launchTime" : 1371223058937,
+"totalMaps" : 1,
+"totalReduces" : 0,
+"otherTasks" : [ ],
+"jobProperties" : {
+"mapreduce.job.ubertask.enable" : "false",
+"yarn.resourcemanager.max-completed-applications" : "1",
+"yarn.resourcemanager.delayed.delegation-token.removal-interval-ms" : "3",
+"mapreduce.client.submit.file.replication" : "2",
+"yarn.nodemanager.container-manager.thread-count" : "20",
+"mapred.queue.default.acl-administer-jobs" : "*",
+"dfs.image.transfer.bandwidthPerSec" : "0",

[20/50] [abbrv] hadoop git commit: HADOOP-12735. core-default.xml misspells hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)

2016-02-01 Thread aengineer
HADOOP-12735. core-default.xml misspells 
hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e8ab3d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e8ab3d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e8ab3d4

Branch: refs/heads/HDFS-7240
Commit: 2e8ab3d46568162af6aa90b612ed61d487e7c7b0
Parents: 79d7949
Author: Colin Patrick Mccabe 
Authored: Wed Jan 27 11:39:55 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Wed Jan 27 11:39:55 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/main/resources/core-default.xml | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e8ab3d4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3b8376f..4da20e0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1651,6 +1651,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12718. Incorrect error message by fs -put local dir without
 permission. (John Zhuge via Yongjun Zhang)
 
+HADOOP-12735. core-default.xml misspells
+hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e8ab3d4/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index c25f49e..ed3802f 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -428,7 +428,7 @@ for ldap providers in the same way as above does.
 
 
 
-  hadoop.work.around.non.threadsafe.getpwuid
+  hadoop.workaround.non.threadsafe.getpwuid
   false
   Some operating systems or authentication modules are known to
   have broken implementations of getpwuid_r and getpwgid_r, such that these



[07/50] [abbrv] hadoop git commit: YARN-4520. Finished app info is unnecessarily persisted in NM state-store if container is acquired but not lunched on this node. Contributed by sandflee

2016-02-01 Thread aengineer
YARN-4520. Finished app info is unnecessarily persisted in NM state-store if 
container is acquired but not lunched on this node. Contributed by sandflee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/992dd2f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/992dd2f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/992dd2f7

Branch: refs/heads/HDFS-7240
Commit: 992dd2f783fc051c32727d4a45a5c61c22bf5640
Parents: d62b4a4
Author: Jian He 
Authored: Mon Jan 25 15:35:51 2016 -0800
Committer: Jian He 
Committed: Mon Jan 25 15:36:14 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt| 3 +++
 .../nodemanager/containermanager/ContainerManagerImpl.java | 6 ++
 2 files changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/992dd2f7/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8ece214..e5049d9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1327,6 +1327,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-4592. Remove unused GetContainerStatus proto. (Chang Li via aajisaka)
 
+YARN-4520. Finished app info is unnecessarily persisted in NM state-store
+if container is acquired but not lunched on this node. (sandflee via 
jianeh)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/992dd2f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index f44de59..d0663d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -1310,6 +1310,12 @@ public class ContainerManagerImpl extends 
CompositeService implements
   CMgrCompletedAppsEvent appsFinishedEvent =
   (CMgrCompletedAppsEvent) event;
   for (ApplicationId appID : appsFinishedEvent.getAppsToCleanup()) {
+Application app = this.context.getApplications().get(appID);
+if (app == null) {
+  LOG.warn("couldn't find application " + appID + " while processing"
+  + " FINISH_APPS event");
+  continue;
+}
 String diagnostic = "";
 if (appsFinishedEvent.getReason() == 
CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN) {
   diagnostic = "Application killed on shutdown";



[23/50] [abbrv] hadoop git commit: Revert "HDFS-9677. Rename generationStampV1/generationStampV2 to legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu."

2016-02-01 Thread aengineer
Revert "HDFS-9677. Rename generationStampV1/generationStampV2 to 
legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu."

This reverts commit 8a91109d16394310f2568717f103e6fff7cbddb0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a957130
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a957130
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a957130

Branch: refs/heads/HDFS-7240
Commit: 3a9571308e99cc374681bbc451a517d41a150aa0
Parents: 8a91109
Author: Jing Zhao 
Authored: Wed Jan 27 16:31:19 2016 -0800
Committer: Jing Zhao 
Committed: Wed Jan 27 16:31:19 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 -
 .../server/blockmanagement/BlockIdManager.java  | 83 ++--
 .../server/blockmanagement/BlockManager.java|  8 +-
 .../OutOfLegacyGenerationStampsException.java   | 38 -
 .../OutOfV1GenerationStampsException.java   | 38 +
 .../hdfs/server/common/HdfsServerConstants.java |  3 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  4 +-
 .../hdfs/server/namenode/FSImageFormat.java | 12 +--
 .../server/namenode/FSImageFormatProtobuf.java  | 12 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  8 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java | 13 ++-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|  4 +-
 .../blockmanagement/TestSequentialBlockId.java  | 18 ++---
 .../hdfs/server/namenode/TestEditLog.java   |  6 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  4 +-
 .../hdfs/server/namenode/TestSaveNamespace.java |  2 +-
 17 files changed, 134 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a957130/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7e75558..097c051 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -959,9 +959,6 @@ Release 2.9.0 - UNRELEASED
 HDFS-9541. Add hdfsStreamBuilder API to libhdfs to support 
defaultBlockSizes
 greater than 2 GB. (cmccabe via zhz)
 
-HDFS-9677. Rename generationStampV1/generationStampV2 to
-legacyGenerationStamp/generationStamp. (Mingliang Liu via jing9)
-
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a957130/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 3f21d9b..9c71287 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -36,11 +36,11 @@ public class BlockIdManager {
* The global generation stamp for legacy blocks with randomly
* generated block IDs.
*/
-  private final GenerationStamp legacyGenerationStamp = new GenerationStamp();
+  private final GenerationStamp generationStampV1 = new GenerationStamp();
   /**
* The global generation stamp for this file system.
*/
-  private final GenerationStamp generationStamp = new GenerationStamp();
+  private final GenerationStamp generationStampV2 = new GenerationStamp();
   /**
* The value of the generation stamp when the first switch to sequential
* block IDs was made. Blocks with generation stamps below this value
@@ -49,7 +49,7 @@ public class BlockIdManager {
* (or initialized as an offset from the V1 (legacy) generation stamp on
* upgrade).
*/
-  private long legacyGenerationStampLimit;
+  private long generationStampV1Limit;
   /**
* The global block ID space for this file system.
*/
@@ -57,8 +57,7 @@ public class BlockIdManager {
   private final SequentialBlockGroupIdGenerator blockGroupIdGenerator;
 
   public BlockIdManager(BlockManager blockManager) {
-this.legacyGenerationStampLimit =
-HdfsConstants.GRANDFATHER_GENERATION_STAMP;
+this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
 this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager);
 this.blockGroupIdGenerator = new 
SequentialBlockGroupIdGenerator(blockManager);
   }
@@ -69,14 +68,14 @@ public class BlockIdManager {
* Should be invoked 

[05/50] [abbrv] hadoop git commit: HADOOP-12715. TestValueQueue#testgetAtMostPolicyALL fails intermittently. Contributed by Xiao Chen.

2016-02-01 Thread aengineer
HADOOP-12715. TestValueQueue#testgetAtMostPolicyALL fails intermittently. 
Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6eacdea0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6eacdea0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6eacdea0

Branch: refs/heads/HDFS-7240
Commit: 6eacdea0e475b4fff91cedce5005a7c11749cf64
Parents: 56a0c17
Author: Walter Su 
Authored: Mon Jan 25 19:30:04 2016 +0800
Committer: Walter Su 
Committed: Mon Jan 25 19:30:04 2016 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../hadoop/crypto/key/TestValueQueue.java   | 65 +++-
 2 files changed, 52 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eacdea0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2fc8ab4..9606296 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -2496,6 +2496,9 @@ Release 2.6.4 - UNRELEASED
 HADOOP-12736. TestTimedOutTestsListener#testThreadDumpAndDeadlocks
 sometimes times out. (Xiao Chen via aajisaka)
 
+HADOOP-12715. TestValueQueue#testgetAtMostPolicyALL fails intermittently.
+(Xiao Chen via waltersu4549)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eacdea0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
index 8e3a093..5eae9a0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
@@ -19,18 +19,24 @@ package org.apache.hadoop.crypto.key;
 
 import java.io.IOException;
 import java.util.Queue;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.crypto.key.kms.ValueQueue;
 import org.apache.hadoop.crypto.key.kms.ValueQueue.QueueRefiller;
 import org.apache.hadoop.crypto.key.kms.ValueQueue.SyncGenerationPolicy;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
 
+import com.google.common.base.Supplier;
 import com.google.common.collect.Sets;
 
 public class TestValueQueue {
+  Logger LOG = LoggerFactory.getLogger(TestValueQueue.class);
 
   private static class FillInfo {
 final int num;
@@ -60,7 +66,7 @@ public class TestValueQueue {
   /**
* Verifies that Queue is initially filled to "numInitValues"
*/
-  @Test
+  @Test(timeout=3)
   public void testInitFill() throws Exception {
 MockFiller filler = new MockFiller();
 ValueQueue vq =
@@ -74,7 +80,7 @@ public class TestValueQueue {
   /**
* Verifies that Queue is initialized (Warmed-up) for provided keys
*/
-  @Test
+  @Test(timeout=3)
   public void testWarmUp() throws Exception {
 MockFiller filler = new MockFiller();
 ValueQueue vq =
@@ -97,7 +103,7 @@ public class TestValueQueue {
* Verifies that the refill task is executed after "checkInterval" if
* num values below "lowWatermark"
*/
-  @Test
+  @Test(timeout=3)
   public void testRefill() throws Exception {
 MockFiller filler = new MockFiller();
 ValueQueue vq =
@@ -116,7 +122,7 @@ public class TestValueQueue {
* Verifies that the No refill Happens after "checkInterval" if
* num values above "lowWatermark"
*/
-  @Test
+  @Test(timeout=3)
   public void testNoRefill() throws Exception {
 MockFiller filler = new MockFiller();
 ValueQueue vq =
@@ -131,29 +137,56 @@ public class TestValueQueue {
   /**
* Verify getAtMost when SyncGeneration Policy = ALL
*/
-  @Test
+  @Test(timeout=3)
   public void testgetAtMostPolicyALL() throws Exception {
 MockFiller filler = new MockFiller();
-ValueQueue vq =
+final ValueQueue vq =
 new ValueQueue(10, 0.1f, 300, 1,
 SyncGenerationPolicy.ALL, filler);
 Assert.assertEquals("test", vq.getNext("k1"));
 Assert.assertEquals(1, filler.getTop().num);
-// Drain completely
-Assert.assertEquals(10, 

[01/50] [abbrv] hadoop git commit: Added MAPREDUCE-6614 to 2.8.0 in CHANGES.txt.

2016-02-01 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 bbe9e8b2d -> 16440b836


Added MAPREDUCE-6614 to 2.8.0 in CHANGES.txt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b833297
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b833297
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b833297

Branch: refs/heads/HDFS-7240
Commit: 2b833297ceb523d39e683fcd34ed8ab9b5651bcf
Parents: 10a2bc0
Author: Akira Ajisaka 
Authored: Mon Jan 25 11:41:19 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Jan 25 11:41:19 2016 +0900

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b833297/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 5eb6984..ba392c3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -695,6 +695,9 @@ Release 2.8.0 - UNRELEASED
 mapreduce.reduce.skip.proc.count.autoincr in mapred-default.xml.
 (Kai Sasaki via aajisaka)
 
+MAPREDUCE-6614. Remove unnecessary code in TestMapreduceConfigFields.
+(Kai Sasaki via aajisaka)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[47/50] [abbrv] hadoop git commit: HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames (Mingliang Liu via stevel)

2016-02-01 Thread aengineer
HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames  (Mingliang 
Liu via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af2dccbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af2dccbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af2dccbc

Branch: refs/heads/HDFS-7240
Commit: af2dccbca50b25f84d9d6c88e1a237a42261ce02
Parents: 8f2622b
Author: Steve Loughran 
Authored: Mon Feb 1 16:01:22 2016 +
Committer: Steve Loughran 
Committed: Mon Feb 1 16:01:50 2016 +

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  3 +-
 .../hdfs/server/namenode/TestFSNamesystem.java  | 78 
 3 files changed, 83 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2dccbc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5a8b525..fdf69d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2661,6 +2661,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final
 DatanodeStorage.State state)' method (Daryn Sharp via vinayakumarb)
 
+HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames
+(Mingliang Liu via stevel)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2dccbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 5e2e975..0387c32 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -889,7 +889,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 
   private List initAuditLoggers(Configuration conf) {
 // Initialize the custom access loggers if configured.
-Collection alClasses = 
conf.getStringCollection(DFS_NAMENODE_AUDIT_LOGGERS_KEY);
+Collection alClasses =
+conf.getTrimmedStringCollection(DFS_NAMENODE_AUDIT_LOGGERS_KEY);
 List auditLoggers = Lists.newArrayList();
 if (alClasses != null && !alClasses.isEmpty()) {
   for (String className : alClasses) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2dccbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index 6308179..b9a2d15 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -20,14 +20,18 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.hamcrest.CoreMatchers.either;
+import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.InetAddress;
 import java.net.URI;
 import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -38,6 +42,7 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.log4j.Level;
@@ -47,6 +52,7 @@ import 

[42/50] [abbrv] hadoop git commit: YARN-4617. LeafQueue#pendingOrderingPolicy should always use fixed ordering policy instead of using same as active applications ordering policy. Contributed by Rohit

2016-02-01 Thread aengineer
YARN-4617. LeafQueue#pendingOrderingPolicy should always use fixed ordering 
policy instead of using same as active applications ordering policy. 
Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4a57d4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4a57d4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4a57d4a

Branch: refs/heads/HDFS-7240
Commit: f4a57d4a531e793373fe3118d644871a3b9ae0b1
Parents: eddd823
Author: Jian He 
Authored: Fri Jan 29 12:22:06 2016 -0800
Committer: Jian He 
Committed: Fri Jan 29 12:22:23 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |  4 +
 .../dev-support/findbugs-exclude.xml|  4 +
 .../scheduler/SchedulerApplicationAttempt.java  |  3 +-
 .../scheduler/capacity/LeafQueue.java   | 82 --
 .../FifoOrderingPolicyForPendingApps.java   | 73 
 .../scheduler/policy/RecoveryComparator.java| 33 
 .../scheduler/policy/SchedulableEntity.java | 10 ++-
 .../scheduler/capacity/TestLeafQueue.java   |  2 -
 .../scheduler/policy/MockSchedulableEntity.java | 17 
 .../TestFifoOrderingPolicyForPendingApps.java   | 89 
 10 files changed, 244 insertions(+), 73 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4a57d4a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 76cad7f..ebbf0f4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1361,6 +1361,10 @@ Release 2.8.0 - UNRELEASED
 YARN-4643. Container recovery is broken with delegating container runtime
 (Sidharta Seethana via jlowe)
 
+YARN-4617. LeafQueue#pendingOrderingPolicy should always use fixed ordering
+policy instead of using same as active applications ordering policy.
+(Rohith Sharma K S via jianhe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4a57d4a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index c12377b..c640d9f 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -163,6 +163,10 @@
 
   
   
+
+
+  
+  
 
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4a57d4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index ca05fe9..0cbb88d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -968,7 +968,8 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
 // queue's resource usage for specific partition
   }
 
-  public boolean isAttemptRecovering() {
+  @Override
+  public boolean isRecovering() {
 return isAttemptRecovering;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4a57d4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 

[41/50] [abbrv] hadoop git commit: HDFS-9638. Improve DistCp Help and documentation. (Wei-Chiu Chuang via Yongjun Zhang)

2016-02-01 Thread aengineer
HDFS-9638. Improve DistCp Help and documentation. (Wei-Chiu Chuang via Yongjun 
Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eddd823c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eddd823c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eddd823c

Branch: refs/heads/HDFS-7240
Commit: eddd823cd6246ddc66218eb01009c44b0236eaaa
Parents: c9a09d6
Author: Yongjun Zhang 
Authored: Fri Jan 29 12:11:55 2016 -0800
Committer: Yongjun Zhang 
Committed: Fri Jan 29 12:11:55 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/tools/DistCpOptionSwitch.java | 2 +-
 hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm| 8 +---
 .../test/java/org/apache/hadoop/tools/TestOptionsParser.java | 3 ++-
 4 files changed, 11 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eddd823c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 570caa5..f7487fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1863,6 +1863,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9706. Log more details in debug logs in BlockReceiver's constructor.
 (Xiao Chen via Yongjun Zhang)
 
+HDFS-9638. Improve DistCp Help and documentation.
+(Wei-Chiu Chuang via Yongjun Zhang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eddd823c/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
index f16a5d2..9485584 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
@@ -82,7 +82,7 @@ public enum DistCpOptionSwitch {
*/
   SSL_CONF(DistCpConstants.CONF_LABEL_SSL_CONF,
   new Option("mapredSslConf", true, "Configuration for ssl config file" +
-  ", to use with hftps://")),
+  ", to use with hftps://. Must be in the classpath.")),
   /**
* Number of threads for building source file listing (before map-reduce
* phase, max one listStatus per thread at a time).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eddd823c/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
--
diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm 
b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
index aacf4c7..8f64ea2 100644
--- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
+++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
@@ -218,7 +218,7 @@ Command Line Options
 
 Flag  | Description  | Notes
 - |  | 
-`-p[rbugpcax]` | Preserve r: replication number b: block size u: user g: group 
p: permission c: checksum-type a: ACL x: XAttr | Modification times are not 
preserved. Also, when `-update` is specified, status updates will **not** be 
synchronized unless the file sizes also differ (i.e. unless the file is 
re-created). If -pa is specified, DistCp preserves the permissions also because 
ACLs are a super-set of permissions.
+`-p[rbugpcaxt]` | Preserve r: replication number b: block size u: user g: 
group p: permission c: checksum-type a: ACL x: XAttr t: timestamp | When 
`-update` is specified, status updates will **not** be synchronized unless the 
file sizes also differ (i.e. unless the file is re-created). If -pa is 
specified, DistCp preserves the permissions also because ACLs are a super-set 
of permissions.
 `-i` | Ignore failures | As explained in the Appendix, this option will keep 
more accurate statistics about the copy than the default case. It also 
preserves logs from failed copies, which can be valuable for debugging. 
Finally, a failing map will not cause the job to fail before all splits are 
attempted.
 `-log ` | Write logs to \ | DistCp keeps logs of each file it 
attempts to copy as map output. If a map fails, the log output will not be 
retained if it is re-executed.
 `-m ` | Maximum number of simultaneous copies | Specify the number 
of maps to copy data. Note 

[10/50] [abbrv] hadoop git commit: YARN-3542. Refactored existing CPU cgroups support to use the newer and integrated ResourceHandler mechanism, and also deprecated the old LCEResourceHandler inteface

2016-02-01 Thread aengineer
YARN-3542. Refactored existing CPU cgroups support to use the newer and 
integrated ResourceHandler mechanism, and also deprecated the old 
LCEResourceHandler inteface hierarchy. Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2085e60a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2085e60a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2085e60a

Branch: refs/heads/HDFS-7240
Commit: 2085e60a9655b59aa2ba8917acdc511ab71ff6e4
Parents: e8650fe
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Authored: Mon Jan 25 16:19:03 2016 -0800
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Committed: Mon Jan 25 16:19:36 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |   4 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  12 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   2 +
 .../nodemanager/LinuxContainerExecutor.java |  28 +-
 .../CGroupsCpuResourceHandlerImpl.java  | 235 +++
 .../linux/resources/CGroupsHandler.java |   4 +
 .../linux/resources/CpuResourceHandler.java |  32 ++
 .../linux/resources/ResourceHandlerModule.java  |  34 +++
 .../util/CgroupsLCEResourcesHandler.java|  68 +
 .../util/DefaultLCEResourcesHandler.java|   1 +
 .../nodemanager/util/LCEResourcesHandler.java   |   1 +
 .../TestCGroupsCpuResourceHandlerImpl.java  | 297 +++
 .../util/TestCgroupsLCEResourcesHandler.java|   1 +
 13 files changed, 661 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2085e60a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 41802ae..c2f16d5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -107,6 +107,10 @@ Release 2.9.0 - UNRELEASED
 YARN-4496. Improve HA ResourceManager Failover detection on the client.
 (Jian He via xgong)
 
+YARN-3542. Refactored existing CPU cgroups support to use the newer and
+integrated ResourceHandler mechanism, and also deprecated the old
+LCEResourceHandler inteface hierarchy. (Varun Vasudev via vinodkv)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2085e60a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 23c2969..e214a86 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -938,6 +938,18 @@ public class YarnConfiguration extends Configuration {
   DEFAULT_NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE =
   90.0f;
 
+  @Private
+  public static final String NM_CPU_RESOURCE_PREFIX = NM_PREFIX
+  + "resource.cpu.";
+
+  /** Enable cpu isolation. */
+  @Private
+  public static final String NM_CPU_RESOURCE_ENABLED =
+  NM_CPU_RESOURCE_PREFIX + "enabled";
+
+  @Private
+  public static final boolean DEFAULT_NM_CPU_RESOURCE_ENABLED = false;
+
   /**
* Prefix for disk configurations. Work in progress: This configuration
* parameter may be changed/removed in the future.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2085e60a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 0e508ed..529d63b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -111,6 +111,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.NM_DISK_RESOURCE_ENABLED);
 configurationPrefixToSkipCompare
 

[26/50] [abbrv] hadoop git commit: HDFS-9654. Code refactoring for HDFS-8578.

2016-02-01 Thread aengineer
HDFS-9654. Code refactoring for HDFS-8578.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/662e17b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/662e17b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/662e17b4

Branch: refs/heads/HDFS-7240
Commit: 662e17b46a0f41ade6a304e12925b70b5d09fc2f
Parents: dca0dc8
Author: Tsz-Wo Nicholas Sze 
Authored: Thu Jan 28 10:56:01 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Thu Jan 28 10:58:03 2016 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hadoop/hdfs/server/common/Storage.java  |   3 +-
 .../server/datanode/BlockPoolSliceStorage.java  | 131 +
 .../hdfs/server/datanode/DataStorage.java   | 282 ++-
 .../hdfs/server/datanode/StorageLocation.java   |  15 +
 .../org/apache/hadoop/hdfs/TestReplication.java |   3 +-
 .../apache/hadoop/hdfs/UpgradeUtilities.java|   2 +-
 .../server/datanode/SimulatedFSDataset.java |   2 +-
 .../datanode/TestDataNodeHotSwapVolumes.java|  48 +++-
 .../hdfs/server/datanode/TestDataStorage.java   |   7 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |   2 +-
 11 files changed, 297 insertions(+), 200 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/662e17b4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7e75558..a51dc15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2665,6 +2665,8 @@ Release 2.7.3 - UNRELEASED
 HDFS-9634. webhdfs client side exceptions don't provide enough details
 (Eric Payne via kihwal)
 
+HDFS-9654. Code refactoring for HDFS-8578.  (szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/662e17b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 7b4b571..41719b9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -640,7 +640,8 @@ public abstract class Storage extends StorageInfo {
 rename(getLastCheckpointTmp(), curDir);
 return;
   default:
-throw new IOException("Unexpected FS state: " + curState);
+throw new IOException("Unexpected FS state: " + curState
++ " for storage directory: " + rootPath);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/662e17b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 1bb..acf10f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -18,10 +18,21 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
@@ -34,18 +45,9 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.util.Daemon;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import 

[15/50] [abbrv] hadoop git commit: HADOOP-12743. Fix git environment check during test-patch (aw)

2016-02-01 Thread aengineer
HADOOP-12743. Fix git environment check during test-patch (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3236396
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3236396
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3236396

Branch: refs/heads/HDFS-7240
Commit: d323639686eab28f1510031e52e4390f82d78989
Parents: cf8af7b
Author: Allen Wittenauer 
Authored: Tue Jan 26 15:46:57 2016 -0800
Committer: Allen Wittenauer 
Committed: Tue Jan 26 15:47:07 2016 -0800

--
 dev-support/bin/yetus-wrapper | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3236396/dev-support/bin/yetus-wrapper
--
diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper
index 37082d8..ac3e121 100755
--- a/dev-support/bin/yetus-wrapper
+++ b/dev-support/bin/yetus-wrapper
@@ -165,6 +165,7 @@ if [[ $? != 0 ]]; then
 fi
 
 if [[ -x "${HADOOP_PATCHPROCESS}/yetus-${HADOOP_YETUS_VERSION}/bin/${WANTED}" 
]]; then
+  popd >/dev/null
   exec "${HADOOP_PATCHPROCESS}/yetus-${HADOOP_YETUS_VERSION}/bin/${WANTED}" 
"${ARGV[@]}"
 fi
 



[21/50] [abbrv] hadoop git commit: YARN-4462. FairScheduler: Disallow preemption from a queue. (Tao Jie via kasha)

2016-02-01 Thread aengineer
YARN-4462. FairScheduler: Disallow preemption from a queue. (Tao Jie via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb238d7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb238d7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb238d7e

Branch: refs/heads/HDFS-7240
Commit: fb238d7e5dcd96466c8938b13ca7f13cedecb08a
Parents: 2e8ab3d
Author: Karthik Kambatla 
Authored: Wed Jan 27 11:47:29 2016 -0800
Committer: Karthik Kambatla 
Committed: Wed Jan 27 12:29:06 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../scheduler/fair/AllocationConfiguration.java |  11 +-
 .../fair/AllocationFileLoaderService.java   |  16 +-
 .../scheduler/fair/FSParentQueue.java   |   8 +
 .../resourcemanager/scheduler/fair/FSQueue.java |  11 +-
 .../webapp/FairSchedulerPage.java   |   1 +
 .../webapp/dao/FairSchedulerQueueInfo.java  |   7 +
 .../scheduler/fair/TestFairScheduler.java   | 327 +++
 8 files changed, 377 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb238d7e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2fbecdb..2fae034 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -67,6 +67,8 @@ Release 2.9.0 - UNRELEASED
 YARN-1856. Added cgroups based memory monitoring for containers as another
 alternative to custom memory-monitoring. (Varun Vasudev via vinodkv)
 
+YARN-4462. FairScheduler: Disallow preemption from a queue. (Tao Jie via 
kasha)
+
   IMPROVEMENTS
 
 YARN-4072. ApplicationHistoryServer, WebAppProxyServer, NodeManager and

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb238d7e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index bf4eae8..180ae49 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -98,6 +98,8 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
   // Reservation system configuration
   private ReservationQueueConfiguration globalReservationQueueConfig;
 
+  private final Set nonPreemptableQueues;
+
   public AllocationConfiguration(Map minQueueResources,
   Map maxQueueResources,
   Map queueMaxApps, Map userMaxApps,
@@ -114,7 +116,8 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
   QueuePlacementPolicy placementPolicy,
   Map configuredQueues,
   ReservationQueueConfiguration globalReservationQueueConfig,
-  Set reservableQueues) {
+  Set reservableQueues,
+  Set nonPreemptableQueues) {
 this.minQueueResources = minQueueResources;
 this.maxQueueResources = maxQueueResources;
 this.queueMaxApps = queueMaxApps;
@@ -135,6 +138,7 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 this.globalReservationQueueConfig = globalReservationQueueConfig;
 this.placementPolicy = placementPolicy;
 this.configuredQueues = configuredQueues;
+this.nonPreemptableQueues = nonPreemptableQueues;
   }
   
   public AllocationConfiguration(Configuration conf) {
@@ -161,6 +165,7 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 }
 placementPolicy = QueuePlacementPolicy.fromConfiguration(conf,
 configuredQueues);
+nonPreemptableQueues = new HashSet();
   }
   
   /**
@@ -210,6 +215,10 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 -1f : fairSharePreemptionThreshold;
   }
 
+  public boolean isPreemptable(String queueName) {
+return 

[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2016-02-01 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/16440b83/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
index 3147767,000..c85a554
mode 100644,00..100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
@@@ -1,323 -1,0 +1,323 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.hadoop.storagecontainer;
 +
 +import com.google.protobuf.BlockingService;
 +import org.apache.hadoop.ha.HAServiceProtocol;
 +import org.apache.hadoop.hdfs.DFSUtil;
 +import org.apache.hadoop.hdfs.DFSUtilClient;
 +import org.apache.hadoop.hdfs.protocol.*;
 +import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos;
 +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
 +import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB;
 +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 +import org.apache.hadoop.hdfs.server.blockmanagement.BlocksMap;
 +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 +import org.apache.hadoop.hdfs.server.namenode.NameNode;
 +import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 +import org.apache.hadoop.hdfs.server.protocol.*;
 +import org.apache.hadoop.ipc.ProtobufRpcEngine;
 +import org.apache.hadoop.ipc.RPC;
 +import org.apache.hadoop.ipc.WritableRpcEngine;
 +import org.apache.hadoop.net.NetUtils;
 +import org.apache.hadoop.ozone.OzoneConfiguration;
 +import org.apache.hadoop.storagecontainer.protocol.ContainerLocationProtocol;
 +import org.apache.hadoop.util.LightWeightGSet;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import java.io.IOException;
 +import java.net.InetSocketAddress;
 +import java.util.List;
 +
 +import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 +
 +/**
 + * Service that allocates storage containers and tracks their
 + * location.
 + */
 +public class StorageContainerManager
 +implements DatanodeProtocol, ContainerLocationProtocol {
 +
 +  public static final Logger LOG =
 +  LoggerFactory.getLogger(StorageContainerManager.class);
 +
 +  private final Namesystem ns = new StorageContainerNameService();
 +  private final BlockManager blockManager;
 +
 +  private long txnId = 234;
 +
 +  /** The RPC server that listens to requests from DataNodes. */
 +  private final RPC.Server serviceRpcServer;
 +  private final InetSocketAddress serviceRPCAddress;
 +
 +  /** The RPC server that listens to requests from clients. */
 +  private final RPC.Server clientRpcServer;
 +  private final InetSocketAddress clientRpcAddress;
 +
 +  public StorageContainerManager(OzoneConfiguration conf)
 +  throws IOException {
 +BlocksMap containerMap = new BlocksMap(
 +LightWeightGSet.computeCapacity(2.0, "BlocksMap"),
 +new StorageContainerMap());
- this.blockManager = new BlockManager(ns, conf, containerMap);
++this.blockManager = new BlockManager(ns, false, conf, containerMap);
 +
 +int handlerCount =
 +conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY,
 +DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
 +
 +RPC.setProtocolEngine(conf, DatanodeProtocolPB.class,
 +ProtobufRpcEngine.class);
 +
 +DatanodeProtocolServerSideTranslatorPB dnProtoPbTranslator =
 +new DatanodeProtocolServerSideTranslatorPB(this);
 +BlockingService dnProtoPbService =
 +DatanodeProtocolProtos.DatanodeProtocolService
 +.newReflectiveBlockingService(dnProtoPbTranslator);
 +
 +WritableRpcEngine.ensureInitialized();
 +
 +InetSocketAddress serviceRpcAddr = NameNode.getServiceAddress(conf, 
false);
 +if (serviceRpcAddr != null) {
 +  String bindHost =
 +  conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
 +  if (bindHost == null || bindHost.isEmpty()) {
 +

[30/50] [abbrv] hadoop git commit: YARN-4219. New levelDB cache storage for timeline v1.5. Contributed by Li Lu

2016-02-01 Thread aengineer
YARN-4219. New levelDB cache storage for timeline v1.5. Contributed by
Li Lu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fab22b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fab22b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fab22b3

Branch: refs/heads/HDFS-7240
Commit: 9fab22b36673e7f1a0bb629d2c07966ac2482e99
Parents: 61382ff
Author: Xuan 
Authored: Thu Jan 28 14:24:22 2016 -0800
Committer: Xuan 
Committed: Thu Jan 28 14:24:22 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   7 +
 .../src/main/resources/yarn-default.xml |   8 +
 .../timeline/KeyValueBasedTimelineStore.java| 574 +++
 .../server/timeline/MemoryTimelineStore.java| 491 ++--
 .../timeline/TimelineStoreMapAdapter.java   |  60 ++
 .../yarn/server/timeline/util/LeveldbUtils.java |   7 +
 .../pom.xml |   4 +
 .../yarn/server/timeline/EntityCacheItem.java   |   3 +-
 .../timeline/LevelDBCacheTimelineStore.java | 316 ++
 .../server/timeline/PluginStoreTestUtils.java   |   2 +-
 .../timeline/TestLevelDBCacheTimelineStore.java |  94 +++
 12 files changed, 1114 insertions(+), 454 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fab22b3/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1b57a3d..8eaed42 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -384,6 +384,8 @@ Release 2.8.0 - UNRELEASED
 YARN-4265. Provide new timeline plugin storage to support fine-grained 
entity
 caching. (Li Lu and Jason Lowe via junping_du)
 
+YARN-4219. New levelDB cache storage for timeline v1.5. (Li Lu via xgong)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fab22b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e214a86..d84c155 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1702,6 +1702,13 @@ public class YarnConfiguration extends Configuration {
   DEFAULT_TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC =
   "2000, 500";
 
+  public static final String TIMELINE_SERVICE_LEVELDB_CACHE_READ_CACHE_SIZE =
+  TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX
+  + "leveldb-cache-read-cache-size";
+
+  public static final long
+  DEFAULT_TIMELINE_SERVICE_LEVELDB_CACHE_READ_CACHE_SIZE = 10 * 1024 * 
1024;
+
   public static final String TIMELINE_SERVICE_CLIENT_FD_FLUSH_INTERVAL_SECS =
   TIMELINE_SERVICE_CLIENT_PREFIX + "fd-flush-interval-secs";
   public static final long

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fab22b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 6508a2a..e33d23e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2036,6 +2036,14 @@
 604800
   
 
+  
+
yarn.timeline-service.entity-group-fs-store.leveldb-cache-read-cache-size
+
+  Read cache size for the leveldb cache storage in ATS v1.5 plugin storage.
+
+10485760
+  
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fab22b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java
--
diff --git 

[48/50] [abbrv] hadoop git commit: MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread. Contributed by Xuan Gong

2016-02-01 Thread aengineer
MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread. 
Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59a212b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59a212b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59a212b6

Branch: refs/heads/HDFS-7240
Commit: 59a212b6e1265adfa9b55c71b65a22157dfccf77
Parents: af2dccb
Author: Jason Lowe 
Authored: Mon Feb 1 16:05:06 2016 +
Committer: Jason Lowe 
Committed: Mon Feb 1 16:05:06 2016 +

--
 hadoop-mapreduce-project/CHANGES.txt|  6 +
 .../org/apache/hadoop/mapred/ClientCache.java   | 24 +++-
 .../hadoop/mapred/ClientServiceDelegate.java| 16 +
 .../org/apache/hadoop/mapred/YARNRunner.java| 11 +
 .../mapred/YarnClientProtocolProvider.java  |  5 ++--
 .../TestYarnClientProtocolProvider.java |  6 +++--
 6 files changed, 63 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a212b6/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 8261b34..55284da 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -736,6 +736,9 @@ Release 2.7.3 - UNRELEASED
 MAPREDUCE-6554. MRAppMaster servicestart failing with NPE in
 MRAppMaster#parsePreviousJobHistory (Bibin A Chundatt via jlowe)
 
+MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread.
+(Xuan Gong via jlowe)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES
@@ -1041,6 +1044,9 @@ Release 2.6.4 - UNRELEASED
 MAPREDUCE-6554. MRAppMaster servicestart failing with NPE in
 MRAppMaster#parsePreviousJobHistory (Bibin A Chundatt via jlowe)
 
+MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread.
+(Xuan Gong via jlowe)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a212b6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
index 4335c82..93ea5c4 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
@@ -22,11 +22,11 @@ import java.io.IOException;
 import java.security.PrivilegedAction;
 import java.util.HashMap;
 import java.util.Map;
-
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
@@ -97,4 +97,26 @@ public class ClientCache {
   }
 });
   }
+
+  public void close() throws IOException {
+if (rm != null) {
+  rm.close();
+}
+
+if (hsProxy != null) {
+  RPC.stopProxy(hsProxy);
+  hsProxy = null;
+}
+
+if (cache != null && !cache.isEmpty()) {
+  for (ClientServiceDelegate delegate : cache.values()) {
+if (delegate != null) {
+  delegate.close();
+  delegate = null;
+}
+  }
+  cache.clear();
+  cache = null;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a212b6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 8517c19..eac8dbc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java

[22/50] [abbrv] hadoop git commit: HDFS-9677. Rename generationStampV1/generationStampV2 to legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.

2016-02-01 Thread aengineer
HDFS-9677. Rename generationStampV1/generationStampV2 to 
legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a91109d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a91109d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a91109d

Branch: refs/heads/HDFS-7240
Commit: 8a91109d16394310f2568717f103e6fff7cbddb0
Parents: fb238d7
Author: Jing Zhao 
Authored: Wed Jan 27 15:48:47 2016 -0800
Committer: Jing Zhao 
Committed: Wed Jan 27 15:48:47 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/blockmanagement/BlockIdManager.java  | 83 ++--
 .../server/blockmanagement/BlockManager.java|  8 +-
 .../OutOfLegacyGenerationStampsException.java   | 38 +
 .../OutOfV1GenerationStampsException.java   | 38 -
 .../hdfs/server/common/HdfsServerConstants.java |  3 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  4 +-
 .../hdfs/server/namenode/FSImageFormat.java | 12 +--
 .../server/namenode/FSImageFormatProtobuf.java  | 12 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  8 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java | 13 +--
 .../hadoop-hdfs/src/main/proto/fsimage.proto|  4 +-
 .../blockmanagement/TestSequentialBlockId.java  | 18 ++---
 .../hdfs/server/namenode/TestEditLog.java   |  6 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  4 +-
 .../hdfs/server/namenode/TestSaveNamespace.java |  2 +-
 17 files changed, 126 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a91109d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 097c051..7e75558 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -959,6 +959,9 @@ Release 2.9.0 - UNRELEASED
 HDFS-9541. Add hdfsStreamBuilder API to libhdfs to support 
defaultBlockSizes
 greater than 2 GB. (cmccabe via zhz)
 
+HDFS-9677. Rename generationStampV1/generationStampV2 to
+legacyGenerationStamp/generationStamp. (Mingliang Liu via jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a91109d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 9c71287..3f21d9b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -36,11 +36,11 @@ public class BlockIdManager {
* The global generation stamp for legacy blocks with randomly
* generated block IDs.
*/
-  private final GenerationStamp generationStampV1 = new GenerationStamp();
+  private final GenerationStamp legacyGenerationStamp = new GenerationStamp();
   /**
* The global generation stamp for this file system.
*/
-  private final GenerationStamp generationStampV2 = new GenerationStamp();
+  private final GenerationStamp generationStamp = new GenerationStamp();
   /**
* The value of the generation stamp when the first switch to sequential
* block IDs was made. Blocks with generation stamps below this value
@@ -49,7 +49,7 @@ public class BlockIdManager {
* (or initialized as an offset from the V1 (legacy) generation stamp on
* upgrade).
*/
-  private long generationStampV1Limit;
+  private long legacyGenerationStampLimit;
   /**
* The global block ID space for this file system.
*/
@@ -57,7 +57,8 @@ public class BlockIdManager {
   private final SequentialBlockGroupIdGenerator blockGroupIdGenerator;
 
   public BlockIdManager(BlockManager blockManager) {
-this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
+this.legacyGenerationStampLimit =
+HdfsConstants.GRANDFATHER_GENERATION_STAMP;
 this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager);
 this.blockGroupIdGenerator = new 
SequentialBlockGroupIdGenerator(blockManager);
   }
@@ -68,14 +69,14 @@ public class BlockIdManager {
* Should be invoked only during the first upgrade to
* sequential block IDs.
*/
-  

[40/50] [abbrv] hadoop git commit: YARN-4647. Make RegisterNodeManagerRequestPBImpl thread-safe. (kasha)

2016-02-01 Thread aengineer
YARN-4647. Make RegisterNodeManagerRequestPBImpl thread-safe. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9a09d69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9a09d69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9a09d69

Branch: refs/heads/HDFS-7240
Commit: c9a09d6926b258e205a4ff7998ce5a86bf5dbe3b
Parents: a277bdc
Author: Karthik Kambatla 
Authored: Fri Jan 29 08:12:54 2016 -0800
Committer: Karthik Kambatla 
Committed: Fri Jan 29 08:12:54 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |  2 +
 .../pb/RegisterNodeManagerRequestPBImpl.java| 75 +++-
 2 files changed, 42 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9a09d69/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 138e581..76cad7f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -113,6 +113,8 @@ Release 2.9.0 - UNRELEASED
 integrated ResourceHandler mechanism, and also deprecated the old
 LCEResourceHandler inteface hierarchy. (Varun Vasudev via vinodkv)
 
+YARN-4647. Make RegisterNodeManagerRequestPBImpl thread-safe. (kasha)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9a09d69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
index 5b0e0a1..2a1a268 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
@@ -65,14 +65,14 @@ public class RegisterNodeManagerRequestPBImpl extends 
RegisterNodeManagerRequest
 viaProto = true;
   }
   
-  public RegisterNodeManagerRequestProto getProto() {
-  mergeLocalToProto();
+  public synchronized RegisterNodeManagerRequestProto getProto() {
+mergeLocalToProto();
 proto = viaProto ? proto : builder.build();
 viaProto = true;
 return proto;
   }
 
-  private void mergeLocalToBuilder() {
+  private synchronized void mergeLocalToBuilder() {
 if (this.containerStatuses != null) {
   addNMContainerStatusesToProto();
 }
@@ -107,15 +107,16 @@ public class RegisterNodeManagerRequestPBImpl extends 
RegisterNodeManagerRequest
   }
 
 
-  private void mergeLocalToProto() {
-if (viaProto) 
+  private synchronized void mergeLocalToProto() {
+if (viaProto) {
   maybeInitBuilder();
+}
 mergeLocalToBuilder();
 proto = builder.build();
 viaProto = true;
   }
 
-  private void maybeInitBuilder() {
+  private synchronized void maybeInitBuilder() {
 if (viaProto || builder == null) {
   builder = RegisterNodeManagerRequestProto.newBuilder(proto);
 }
@@ -124,7 +125,7 @@ public class RegisterNodeManagerRequestPBImpl extends 
RegisterNodeManagerRequest
 
   
   @Override
-  public Resource getResource() {
+  public synchronized Resource getResource() {
 RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
 if (this.resource != null) {
   return this.resource;
@@ -137,7 +138,7 @@ public class RegisterNodeManagerRequestPBImpl extends 
RegisterNodeManagerRequest
   }
 
   @Override
-  public void setResource(Resource resource) {
+  public synchronized void setResource(Resource resource) {
 maybeInitBuilder();
 if (resource == null) 
   builder.clearResource();
@@ -145,7 +146,7 @@ public class RegisterNodeManagerRequestPBImpl extends 
RegisterNodeManagerRequest
   }
 
   @Override
-  public NodeId getNodeId() {
+  public synchronized NodeId getNodeId() {
 RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
 if (this.nodeId != null) {
   return this.nodeId;
@@ -158,15 +159,16 @@ public class RegisterNodeManagerRequestPBImpl extends 
RegisterNodeManagerRequest
   }
 
   @Override
-  

[19/50] [abbrv] hadoop git commit: HADOOP-12492. maven install triggers bats test (aw)

2016-02-01 Thread aengineer
HADOOP-12492. maven install triggers bats test (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79d7949f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79d7949f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79d7949f

Branch: refs/heads/HDFS-7240
Commit: 79d7949fbb19928e0cae4f6b5dd9f1af82242f53
Parents: 97056c3
Author: Allen Wittenauer 
Authored: Wed Jan 27 11:27:27 2016 -0800
Committer: Allen Wittenauer 
Committed: Wed Jan 27 11:27:27 2016 -0800

--
 hadoop-common-project/hadoop-common/pom.xml | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79d7949f/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 68ad350..7e4d090 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -1028,7 +1028,9 @@
 
   shelltest
   
-true
+
+  !skipTests
+
   
   
 
@@ -1037,7 +1039,7 @@
 
 
 common-test-bats-driver
-process-test-classes
+test
 
 run
 



[44/50] [abbrv] hadoop git commit: YARN-4615. Fix random test failure in TestAbstractYarnScheduler#testResourceRequestRecoveryToTheRightAppAttempt. (Sunil G via rohithsharmaks)

2016-02-01 Thread aengineer
YARN-4615. Fix random test failure in 
TestAbstractYarnScheduler#testResourceRequestRecoveryToTheRightAppAttempt. 
(Sunil G via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2673cbaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2673cbaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2673cbaf

Branch: refs/heads/HDFS-7240
Commit: 2673cbaf556eb4d0e44519cdbb8c6f0f02412a21
Parents: 772ea7b
Author: Rohith Sharma K S 
Authored: Mon Feb 1 10:43:56 2016 +0530
Committer: Rohith Sharma K S 
Committed: Mon Feb 1 10:43:56 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../yarn/server/resourcemanager/MockRM.java | 32 +++-
 .../scheduler/TestAbstractYarnScheduler.java|  5 ++-
 3 files changed, 30 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2673cbaf/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d82a9be..fd9f5cf 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -178,6 +178,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4543. Fix random test failure in 
TestNodeStatusUpdater.testStopReentrant
 (Akihiro Suda via rohithsharmaks)
 
+YARN-4615. Fix random test failure in 
TestAbstractYarnScheduler#testResource
+RequestRecoveryToTheRightAppAttempt. (Sunil G via rohithsharmaks)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2673cbaf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index a5d14c3..f6b1f43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -202,15 +202,33 @@ public class MockRM extends ResourceManager {
 
   public void waitForContainerState(ContainerId containerId,
   RMContainerState state) throws Exception {
-int timeoutSecs = 0;
+// This method will assert if state is not expected after timeout.
+Assert.assertTrue(waitForContainerState(containerId, state, 8 * 1000));
+  }
+
+  public boolean waitForContainerState(ContainerId containerId,
+  RMContainerState containerState, int timeoutMillisecs) throws Exception {
 RMContainer container = getResourceScheduler().getRMContainer(containerId);
-while ((container == null || container.getState() != state)
-&& timeoutSecs++ < 40) {
-  System.out.println(
-  "Waiting for" + containerId + " state to be:" + state.name());
-  Thread.sleep(200);
+int timeoutSecs = 0;
+while (((container == null) || 
!containerState.equals(container.getState()))
+&& timeoutSecs++ < timeoutMillisecs / 100) {
+  if(container == null){
+container = getResourceScheduler().getRMContainer(containerId);
+  }
+  System.out.println("Container : " + containerId +
+  " Waiting for state : " + containerState);
+
+  Thread.sleep(100);
+
+  if (timeoutMillisecs <= timeoutSecs * 100) {
+return false;
+  }
 }
-Assert.assertTrue(container.getState() == state);
+
+System.out.println("Container State is : " + container.getState());
+Assert.assertEquals("Container state is not correct (timedout)",
+containerState, container.getState());
+return true;
   }
 
   public void waitForContainerAllocated(MockNM nm, ContainerId containerId)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2673cbaf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
 

[29/50] [abbrv] hadoop git commit: YARN-4643. Container recovery is broken with delegating container runtime. Contributed by Sidharta Seethana

2016-02-01 Thread aengineer
YARN-4643. Container recovery is broken with delegating container runtime. 
Contributed by Sidharta Seethana


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61382ff8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61382ff8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61382ff8

Branch: refs/heads/HDFS-7240
Commit: 61382ff8fabc76b3a51f227646573cdf367fea1a
Parents: ef343be
Author: Jason Lowe 
Authored: Thu Jan 28 18:59:35 2016 +
Committer: Jason Lowe 
Committed: Thu Jan 28 18:59:35 2016 +

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../containermanager/launcher/RecoveredContainerLaunch.java | 7 ---
 .../yarn/server/nodemanager/TestLinuxContainerExecutor.java | 9 +
 3 files changed, 16 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61382ff8/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c8a8c06..1b57a3d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1345,6 +1345,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4520. Finished app info is unnecessarily persisted in NM state-store
 if container is acquired but not lunched on this node. (sandflee via 
jianeh)
 
+YARN-4643. Container recovery is broken with delegating container runtime
+(Sidharta Seethana via jlowe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61382ff8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
index d7b9ae2..66f5a2a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
@@ -83,9 +83,10 @@ public class RecoveredContainerLaunch extends 
ContainerLaunch {
 exec.activateContainer(containerId, pidFilePath);
 retCode = exec.reacquireContainer(
 new ContainerReacquisitionContext.Builder()
-.setUser(container.getUser())
-.setContainerId(containerId)
-.build());
+.setContainer(container)
+.setUser(container.getUser())
+.setContainerId(containerId)
+.build());
   } else {
 LOG.warn("Unable to locate pid file for container " + containerIdStr);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61382ff8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
index 58debc9..88ebf8d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
@@ -629,7 +629,16 @@ public class TestLinuxContainerExecutor {
 } catch (IOException e) {
   // expected if LCE isn't setup right, but not necessary for this test
 }
+
+Container container = mock(Container.class);
+ContainerLaunchContext context = mock(ContainerLaunchContext.class);
+HashMap env = new HashMap<>();
+
+

[39/50] [abbrv] hadoop git commit: YARN-4411. RMAppAttemptImpl#createApplicationAttemptReport throws IllegalArgumentException. Contributed by Bibin A Chundatt and yarntime.

2016-02-01 Thread aengineer
YARN-4411. RMAppAttemptImpl#createApplicationAttemptReport throws
IllegalArgumentException. Contributed by Bibin A Chundatt and yarntime.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a277bdc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a277bdc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a277bdc9

Branch: refs/heads/HDFS-7240
Commit: a277bdc9edc66bef419fcd063b832073e512f234
Parents: df99ea8
Author: Devaraj K 
Authored: Fri Jan 29 13:51:37 2016 +0530
Committer: Devaraj K 
Committed: Fri Jan 29 13:51:37 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt|  3 +++
 .../rmapp/attempt/RMAppAttemptImpl.java| 10 +-
 .../rmapp/attempt/TestRMAppAttemptTransitions.java | 13 +
 3 files changed, 21 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a277bdc9/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index dd61f2a..138e581 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -392,6 +392,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-4219. New levelDB cache storage for timeline v1.5. (Li Lu via xgong)
 
+YARN-4411. RMAppAttemptImpl#createApplicationAttemptReport throws
+IllegalArgumentException. (Bibin A Chundatt, yarntime via devaraj)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a277bdc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 3f45cb4..99f1b21 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -2067,11 +2067,11 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   // am container.
   ContainerId amId =
   masterContainer == null ? null : masterContainer.getId();
-  attemptReport = ApplicationAttemptReport.newInstance(this
-  .getAppAttemptId(), this.getHost(), this.getRpcPort(), this
-  .getTrackingUrl(), this.getOriginalTrackingUrl(), 
this.getDiagnostics(),
-  YarnApplicationAttemptState.valueOf(this.getState().toString()),
-  amId, this.startTime, this.finishTime);
+  attemptReport = ApplicationAttemptReport.newInstance(
+  this.getAppAttemptId(), this.getHost(), this.getRpcPort(),
+  this.getTrackingUrl(), this.getOriginalTrackingUrl(),
+  this.getDiagnostics(), createApplicationAttemptState(), amId,
+  this.startTime, this.finishTime);
 } finally {
   this.readLock.unlock();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a277bdc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
index 7f9610f..e7985fa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
@@ 

[34/50] [abbrv] hadoop git commit: HDFS-7764. DirectoryScanner shouldn't abort the scan if one directory had an error (Rakesh R via cmccabe)

2016-02-01 Thread aengineer
HDFS-7764. DirectoryScanner shouldn't abort the scan if one directory had an 
error (Rakesh R via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f67149ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f67149ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f67149ab

Branch: refs/heads/HDFS-7240
Commit: f67149ab08bb49381def6c535ab4c4610e0a4221
Parents: ee005e0
Author: Colin Patrick Mccabe 
Authored: Thu Jan 28 19:54:50 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Thu Jan 28 19:54:50 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/datanode/DirectoryScanner.java  | 72 +---
 .../server/datanode/TestDirectoryScanner.java   | 50 ++
 3 files changed, 100 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f67149ab/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a51dc15..9b80aa1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -962,6 +962,9 @@ Release 2.9.0 - UNRELEASED
 HDFS-9677. Rename generationStampV1/generationStampV2 to
 legacyGenerationStamp/generationStamp. (Mingliang Liu via jing9)
 
+HDFS-7764. DirectoryScanner shouldn't abort the scan if one directory had
+an error (Rakesh R via cmccabe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f67149ab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 392c121..083ca31 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import com.google.common.annotations.VisibleForTesting;
 import java.io.File;
+import java.io.FilenameFilter;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -42,12 +44,12 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.Time;
@@ -727,18 +729,20 @@ public class DirectoryScanner implements Runnable {
 
   for (Entry report :
   compilersInProgress.entrySet()) {
+Integer index = report.getKey();
 try {
-  dirReports[report.getKey()] = report.getValue().get();
+  dirReports[index] = report.getValue().get();
 
   // If our compiler threads were interrupted, give up on this run
-  if (dirReports[report.getKey()] == null) {
+  if (dirReports[index] == null) {
 dirReports = null;
 break;
   }
 } catch (Exception ex) {
-  LOG.error("Error compiling report", ex);
-  // Propagate ex to DataBlockScanner to deal with
-  throw new RuntimeException(ex);
+  FsVolumeSpi fsVolumeSpi = volumes.get(index);
+  LOG.error("Error compiling report for the volume, StorageId: "
+  + fsVolumeSpi.getStorageID(), ex);
+  // Continue scanning the other volumes
 }
   }
 } catch (IOException e) {
@@ -747,7 +751,9 @@ public class DirectoryScanner implements Runnable {
 if (dirReports != null) {
   // Compile consolidated report for all the volumes
   for (ScanInfoPerBlockPool report : dirReports) {
-list.addAll(report);
+if(report != null){
+  list.addAll(report);
+}
   }
 }
 return list.toSortedArrays();
@@ 

[31/50] [abbrv] hadoop git commit: MAPREDUCE-6563. Streaming documentation contains a stray % character. Contributed by Chris Nauroth.

2016-02-01 Thread aengineer
MAPREDUCE-6563. Streaming documentation contains a stray % character. 
Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41da9a07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41da9a07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41da9a07

Branch: refs/heads/HDFS-7240
Commit: 41da9a07738c6d019e2467f139750b7e2f50a914
Parents: 9fab22b
Author: cnauroth 
Authored: Thu Jan 28 14:26:52 2016 -0800
Committer: cnauroth 
Committed: Thu Jan 28 14:46:01 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41da9a07/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index a8abdb4..08cd1d3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -706,6 +706,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6610. JobHistoryEventHandler should not swallow timeline response
 (Li Lu via jianhe)
 
+MAPREDUCE-6563. Streaming documentation contains a stray '%' character.
+(cnauroth)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41da9a07/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
--
diff --git 
a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm 
b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
index 2223352..cc8ed69 100644
--- a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
+++ b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
@@ -1,4 +1,4 @@

[33/50] [abbrv] hadoop git commit: HADOOP-12702. Add an HDFS metrics sink. (Daniel Templeton via kasha)

2016-02-01 Thread aengineer
HADOOP-12702. Add an HDFS metrics sink. (Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee005e01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee005e01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee005e01

Branch: refs/heads/HDFS-7240
Commit: ee005e010cff3f97a5daa8000ac2cd151e2631ca
Parents: 7f46636
Author: Karthik Kambatla 
Authored: Thu Jan 28 17:43:17 2016 -0800
Committer: Karthik Kambatla 
Committed: Thu Jan 28 17:43:17 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 .../metrics2/sink/RollingFileSystemSink.java| 420 +++
 .../sink/RollingFileSystemSinkTestBase.java | 506 +++
 .../sink/TestRollingFileSystemSink.java | 156 ++
 4 files changed, 1084 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee005e01/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4da20e0..4d01857 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -650,6 +650,8 @@ Release 2.9.0 - UNRELEASED
 
   NEW FEATURES
 
+HADOOP-12702. Add an HDFS metrics sink. (Daniel Templeton via kasha)
+
   IMPROVEMENTS
 
 HADOOP-12321. Make JvmPauseMonitor an AbstractService.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee005e01/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java
new file mode 100644
index 000..8271362
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java
@@ -0,0 +1,420 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.sink;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.InetAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.StandardCharsets;
+import java.util.Date;
+import java.util.TimeZone;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.lang.time.FastDateFormat;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsException;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.metrics2.MetricsTag;
+
+/**
+ * This class is a metrics sink that uses
+ * {@link org.apache.hadoop.fs.FileSystem} to write the metrics logs.  Every
+ * hour a new directory will be created under the path specified by the
+ * basepath property. All metrics will be logged to a file in the
+ * current hour's directory in a file named hostname.log, where
+ * hostname is the name of the host on which the metrics logging
+ * process is running. The base path is set by the
+ * prefix.sink.instance.basepath property.  The
+ * time zone used to create the current hour's directory name is GMT.  If the
+ * basepath property isn't specified, it will default to
+ * /tmp, which is the temp directory on whatever default file
+ * system is configured for the cluster.
+ *
+ * The prefix.sink.instance.ignore-error property
+ * controls whether an 

[45/50] [abbrv] hadoop git commit: HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final DatanodeStorage.State state)' method (Contributed by Daryn Sharp)

2016-02-01 Thread aengineer
HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final 
DatanodeStorage.State state)' method (Contributed by Daryn Sharp)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e418bd1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e418bd1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e418bd1f

Branch: refs/heads/HDFS-7240
Commit: e418bd1fb0568ce7ae22f588fea2dd9c95567383
Parents: 2673cba
Author: Vinayakumar B 
Authored: Mon Feb 1 13:24:05 2016 +0530
Committer: Vinayakumar B 
Committed: Mon Feb 1 13:24:05 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockManager.java| 16 
 .../hdfs/server/blockmanagement/BlocksMap.java  | 20 
 3 files changed, 15 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e418bd1f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f7487fe..432e686 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2655,6 +2655,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9682. Fix a typo "aplication" in HttpFS document.
 (Weiwei Yang via aajisaka)
 
+HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final
+DatanodeStorage.State state)' method (Daryn Sharp via vinayakumarb)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e418bd1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a76429e..587e6b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1212,8 +1212,10 @@ public class BlockManager implements BlockStatsMXBean {
   return;
 }
 StringBuilder datanodes = new StringBuilder();
-for(DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock,
-State.NORMAL)) {
+for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
+  if (storage.getState() != State.NORMAL) {
+continue;
+  }
   final DatanodeDescriptor node = storage.getDatanodeDescriptor();
   final Block b = getBlockOnStorage(storedBlock, storage);
   if (b != null) {
@@ -3164,7 +3166,10 @@ public class BlockManager implements BlockStatsMXBean {
 Collection nonExcess = new ArrayList<>();
 Collection corruptNodes = corruptReplicas
 .getNodes(block);
-for(DatanodeStorageInfo storage : blocksMap.getStorages(block, 
State.NORMAL)) {
+for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
+  if (storage.getState() != State.NORMAL) {
+continue;
+  }
   final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
   if (storage.areBlockContentsStale()) {
 LOG.trace("BLOCK* processOverReplicatedBlock: Postponing {}"
@@ -3665,7 +3670,10 @@ public class BlockManager implements BlockStatsMXBean {
 // else proceed with fast case
 int live = 0;
 Collection nodesCorrupt = corruptReplicas.getNodes(b);
-for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
+for (DatanodeStorageInfo storage : blocksMap.getStorages(b)) {
+  if (storage.getState() != State.NORMAL) {
+continue;
+  }
   final DatanodeDescriptor node = storage.getDatanodeDescriptor();
   if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node)))
 live++;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e418bd1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index ed05e3a..47a21fe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
+++ 

[38/50] [abbrv] hadoop git commit: MAPREDUCE-6616. Fail to create jobhistory file if there are some multibyte characters in the job name. Contributed by Kousuke Saruta.

2016-02-01 Thread aengineer
MAPREDUCE-6616. Fail to create jobhistory file if there are some multibyte 
characters in the job name. Contributed by Kousuke Saruta.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df99ea8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df99ea8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df99ea8a

Branch: refs/heads/HDFS-7240
Commit: df99ea8a92d600e669606d41d3887bd004e7a3cc
Parents: 8ee0603
Author: Akira Ajisaka 
Authored: Fri Jan 29 16:19:28 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Jan 29 16:20:29 2016 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../v2/jobhistory/FileNameIndexUtils.java   | 171 +++-
 .../v2/jobhistory/TestFileNameIndexUtils.java   | 199 ---
 3 files changed, 296 insertions(+), 77 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df99ea8a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 08cd1d3..8261b34 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -709,6 +709,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6563. Streaming documentation contains a stray '%' character.
 (cnauroth)
 
+MAPREDUCE-6616. Fail to create jobhistory file if there are some multibyte
+characters in the job name. (Kousuke Saruta via aajisaka)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df99ea8a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java
index eb0c54c..284fe80 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.net.URLDecoder;
 import java.net.URLEncoder;
+import static java.nio.charset.StandardCharsets.UTF_8;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -35,7 +36,7 @@ public class FileNameIndexUtils {
   // Sanitize job history file for predictable parsing
   static final String DELIMITER = "-";
   static final String DELIMITER_ESCAPE = "%2D";
-  
+
   private static final Log LOG = LogFactory.getLog(FileNameIndexUtils.class);
 
   // Job history file names need to be backwards compatible
@@ -57,7 +58,8 @@ public class FileNameIndexUtils {
* @param indexInfo the index info.
* @return the done job history filename.
*/
-  public static String getDoneFileName(JobIndexInfo indexInfo) throws 
IOException {
+  public static String getDoneFileName(JobIndexInfo indexInfo)
+  throws IOException {
 return getDoneFileName(indexInfo,
 JHAdminConfig.DEFAULT_MR_HS_JOBNAME_LIMIT);
   }
@@ -66,49 +68,58 @@ public class FileNameIndexUtils {
   int jobNameLimit) throws IOException {
 StringBuilder sb = new StringBuilder();
 //JobId
-
sb.append(escapeDelimiters(TypeConverter.fromYarn(indexInfo.getJobId()).toString()));
+sb.append(encodeJobHistoryFileName(escapeDelimiters(
+TypeConverter.fromYarn(indexInfo.getJobId()).toString(;
 sb.append(DELIMITER);
-
+
 //SubmitTime
-sb.append(indexInfo.getSubmitTime());
+sb.append(encodeJobHistoryFileName(String.valueOf(
+indexInfo.getSubmitTime(;
 sb.append(DELIMITER);
-
+
 //UserName
-sb.append(escapeDelimiters(getUserName(indexInfo)));
+sb.append(encodeJobHistoryFileName(escapeDelimiters(
+getUserName(indexInfo;
 sb.append(DELIMITER);
-
+
 //JobName
-sb.append(escapeDelimiters(trimJobName(
-getJobName(indexInfo), jobNameLimit)));
+sb.append(trimURLEncodedString(encodeJobHistoryFileName(escapeDelimiters(
+getJobName(indexInfo))), jobNameLimit));
 sb.append(DELIMITER);
-
+
 //FinishTime
-sb.append(indexInfo.getFinishTime());
+

[03/50] [abbrv] hadoop git commit: HADOOP-12736. TestTimedOutTestsListener#testThreadDumpAndDeadlocks sometimes times out. Contributed by Xiao Chen.

2016-02-01 Thread aengineer
HADOOP-12736. TestTimedOutTestsListener#testThreadDumpAndDeadlocks sometimes 
times out. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64322792
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64322792
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64322792

Branch: refs/heads/HDFS-7240
Commit: 643227927a7d7974655627d7e97aae42600692ae
Parents: 736eb17
Author: Akira Ajisaka 
Authored: Mon Jan 25 13:59:25 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Jan 25 13:59:25 2016 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/test/TestTimedOutTestsListener.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64322792/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3db68fb..2fc8ab4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -2493,6 +2493,9 @@ Release 2.6.4 - UNRELEASED
 HADOOP-12706. TestLocalFsFCStatistics#testStatisticsThreadLocalDataCleanUp
 times out occasionally (Sangjin Lee and Colin Patrick McCabe via jlowe)
 
+HADOOP-12736. TestTimedOutTestsListener#testThreadDumpAndDeadlocks
+sometimes times out. (Xiao Chen via aajisaka)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64322792/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java
index 62748b4..1334f1c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java
@@ -144,7 +144,7 @@ public class TestTimedOutTestsListener {
   
   }
 
-  @Test(timeout=500)
+  @Test(timeout=3)
   public void testThreadDumpAndDeadlocks() throws Exception {
 new Deadlock();
 String s = null;



[46/50] [abbrv] hadoop git commit: HDFS-9659. EditLogTailerThread to Active Namenode RPC should timeout (Contributed by surendra singh lilhore)

2016-02-01 Thread aengineer
HDFS-9659. EditLogTailerThread to Active Namenode RPC should timeout 
(Contributed by surendra singh lilhore)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f2622b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f2622b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f2622b6

Branch: refs/heads/HDFS-7240
Commit: 8f2622b6a0603f92e8b5784879da28d3d5797fc1
Parents: e418bd1
Author: Vinayakumar B 
Authored: Mon Feb 1 14:10:55 2016 +0530
Committer: Vinayakumar B 
Committed: Mon Feb 1 14:10:55 2016 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java   | 6 +-
 2 files changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f2622b6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 432e686..5a8b525 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -903,6 +903,9 @@ Trunk (Unreleased)
   HDFS-9575. Use byte array for internal block indices in a striped block.
   (jing9 via szetszwo)
 
+  HDFS-9659. EditLogTailerThread to Active Namenode RPC should timeout
+  (surendra singh lilhore via vinayakumarb)
+
 Release 2.9.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f2622b6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index 6e60dba..405bf4f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -450,8 +450,12 @@ public class EditLogTailer {
 
   currentNN = nnLookup.next();
   try {
+int rpcTimeout = conf.getInt(
+DFSConfigKeys.DFS_HA_LOGROLL_RPC_TIMEOUT_KEY,
+DFSConfigKeys.DFS_HA_LOGROLL_RPC_TIMEOUT_DEFAULT);
 NamenodeProtocolPB proxy = 
RPC.waitForProxy(NamenodeProtocolPB.class,
-RPC.getProtocolVersion(NamenodeProtocolPB.class), 
currentNN.getIpcAddress(), conf);
+RPC.getProtocolVersion(NamenodeProtocolPB.class), 
currentNN.getIpcAddress(), conf,
+rpcTimeout, Long.MAX_VALUE);
 cachedActiveProxy = new NamenodeProtocolTranslatorPB(proxy);
 break;
   } catch (IOException e) {



[06/50] [abbrv] hadoop git commit: HDFS-9094. Add command line option to ask NameNode reload configuration. (Contributed by Xiaobing Zhou)

2016-02-01 Thread aengineer
HDFS-9094. Add command line option to ask NameNode reload configuration. 
(Contributed by Xiaobing Zhou)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d62b4a4d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d62b4a4d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d62b4a4d

Branch: refs/heads/HDFS-7240
Commit: d62b4a4de75edb840df6634f49cb4beb74e3fb07
Parents: 6eacdea
Author: Arpit Agarwal 
Authored: Mon Jan 25 12:17:05 2016 -0800
Committer: Arpit Agarwal 
Committed: Mon Jan 25 12:17:05 2016 -0800

--
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |   8 +
 .../hdfs/protocol/ReconfigurationProtocol.java  |   4 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../ReconfigurationProtocolServerSideUtils.java |   4 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  35 +++
 .../hdfs/server/protocol/NamenodeProtocols.java |   2 +
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 254 +--
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 162 
 8 files changed, 350 insertions(+), 122 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b4a4d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 62c5d81..8f6ed14 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -38,9 +38,11 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.ReconfigurationProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.util.IOUtilsClient;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
@@ -496,6 +498,12 @@ public class DFSUtilClient {
 return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory);
   }
 
+  public static ReconfigurationProtocol createReconfigurationProtocolProxy(
+  InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
+  SocketFactory factory) throws IOException {
+return new ReconfigurationProtocolTranslatorPB(addr, ticket, conf, 
factory);
+  }
+
   /**
* Creates a new KeyProvider from the given Configuration.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b4a4d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
index 75dc877..8370438 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
@@ -25,6 +25,7 @@ import java.util.List;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.ReconfigurationTaskStatus;
+import org.apache.hadoop.io.retry.Idempotent;
 
 /**
  * ReconfigurationProtocol is used by HDFS admin to reload configuration
@@ -39,16 +40,19 @@ public interface ReconfigurationProtocol {
   /**
* Asynchronously reload configuration on disk and apply changes.
*/
+  @Idempotent
   void startReconfiguration() throws IOException;
 
   /**
* Get the status of the previously issued reconfig task.
* @see {@link org.apache.hadoop.conf.ReconfigurationTaskStatus}.
*/
+  @Idempotent
   ReconfigurationTaskStatus getReconfigurationStatus() throws IOException;
 
   /**
* Get a list of allowed properties for reconfiguration.

[11/50] [abbrv] hadoop git commit: HDFS-8999. Allow a file to be closed with COMMITTED but not yet COMPLETE blocks.

2016-02-01 Thread aengineer
HDFS-8999. Allow a file to be closed with COMMITTED but not yet COMPLETE blocks.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd909ed9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd909ed9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd909ed9

Branch: refs/heads/HDFS-7240
Commit: bd909ed9f2d853f614f04a50e2230a7932732776
Parents: 2085e60
Author: Tsz-Wo Nicholas Sze 
Authored: Tue Jan 26 10:32:51 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Tue Jan 26 10:32:51 2016 +0800

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 42 ---
 .../org/apache/hadoop/hdfs/DataStreamer.java|  3 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../server/blockmanagement/BlockManager.java| 38 --
 .../hdfs/server/namenode/FSDirAppendOp.java | 15 +++-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  8 +--
 .../hdfs/server/namenode/FSNamesystem.java  | 74 +++-
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 56 +++
 .../hdfs/server/namenode/LeaseManager.java  | 17 ++---
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 56 ++-
 .../server/namenode/TestFSEditLogLoader.java| 23 +++---
 .../hdfs/server/namenode/TestFSImage.java   |  4 +-
 .../hdfs/server/namenode/TestINodeFile.java |  6 +-
 .../hdfs/server/namenode/TestLeaseManager.java  |  4 +-
 16 files changed, 255 insertions(+), 102 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd909ed9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 02ef47e..9c00ea7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -119,6 +119,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -160,10 +161,10 @@ import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.RpcNoSuchMethodException;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
@@ -178,16 +179,15 @@ import org.apache.hadoop.util.DataChecksum.Type;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
 import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.net.InetAddresses;
-import org.apache.htrace.core.Tracer;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /
  * DFSClient can connect to a Hadoop Filesystem and
@@ -1291,17 +1291,43 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  /**
+   * Invoke namenode append RPC.
+   * It retries in case of {@link BlockNotYetCompleteException}.
+   */
+  private LastBlockWithStatus callAppend(String src,
+  EnumSetWritable flag) throws IOException {
+final long startTime = Time.monotonicNow();
+for(;;) {
+  try {
+return namenode.append(src, clientName, flag);
+  } catch(RemoteException re) {
+if (Time.monotonicNow() - startTime > 5000
+|| !RetriableException.class.getName().equals(
+re.getClassName())) {
+  throw re;
+}
+
+try { // sleep and retry
+  

[32/50] [abbrv] hadoop git commit: YARN-4519. Potential deadlock of CapacityScheduler between decrease container and assign containers. Contributed by Meng Ding

2016-02-01 Thread aengineer
YARN-4519. Potential deadlock of CapacityScheduler between decrease container 
and assign containers. Contributed by Meng Ding


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f466364
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f466364
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f466364

Branch: refs/heads/HDFS-7240
Commit: 7f46636495e23693d588b0915f464fa7afd9102e
Parents: 41da9a0
Author: Jian He 
Authored: Wed Jan 27 15:38:32 2016 -0800
Committer: Jian He 
Committed: Thu Jan 28 14:51:00 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../server/resourcemanager/RMServerUtils.java   |  76 ++--
 .../scheduler/AbstractYarnScheduler.java|  64 --
 .../scheduler/AppSchedulingInfo.java|  30 +++--
 .../scheduler/SchedContainerChangeRequest.java  |  33 +++--
 .../scheduler/capacity/CSQueue.java |   3 +-
 .../scheduler/capacity/CapacityScheduler.java   | 119 ++-
 .../scheduler/capacity/LeafQueue.java   |  83 ++---
 .../scheduler/capacity/ParentQueue.java |   4 +-
 .../capacity/TestContainerResizing.java |  87 ++
 10 files changed, 322 insertions(+), 180 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f466364/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8eaed42..ee57e4b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -170,6 +170,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4633. Fix random test failure in 
TestRMRestart#testRMRestartAfterPreemption
 (Bibin A Chundatt via rohithsharmaks)
 
+YARN-4519. Potential deadlock of CapacityScheduler between decrease 
container
+and assign containers. (Meng Ding via jianhe)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f466364/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index cc30593..e19d55e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -53,9 +53,10 @@ import 
org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
-import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.SchedContainerChangeRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
@@ -114,43 +115,25 @@ public class RMServerUtils {
   queueName, scheduler, rmContext, queueInfo);
 }
   }
-  
+
   /**
-   * Normalize container increase/decrease request, it will normalize and 
update
-   * ContainerResourceChangeRequest.targetResource
+   * Validate increase/decrease request. This function must be called under
+   * the queue lock to make sure that the access to container resource is
+   * atomic. Refer to LeafQueue.decreaseContainer() and
+   * CapacityScheduelr.updateIncreaseRequests()
+   *
* 
* 
* - Throw exception when any other error happens
* 
*/
-  public static void checkAndNormalizeContainerChangeRequest(
-  RMContext rmContext, ContainerResourceChangeRequest request,
-  boolean increase) throws InvalidResourceRequestException {
+  public static void checkSchedContainerChangeRequest(
+  

[13/50] [abbrv] hadoop git commit: MAPREDUCE-6610. JobHistoryEventHandler should not swallow timeline response. Contributed by Li Lu

2016-02-01 Thread aengineer
MAPREDUCE-6610. JobHistoryEventHandler should not swallow timeline response. 
Contributed by Li Lu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0d7c221
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0d7c221
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0d7c221

Branch: refs/heads/HDFS-7240
Commit: d0d7c221682a88ac6e11e9b7c07513e369104b10
Parents: 45c763a
Author: Jian He 
Authored: Mon Jan 25 23:01:03 2016 -0800
Committer: Jian He 
Committed: Mon Jan 25 23:01:03 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt  |  3 +++
 .../jobhistory/JobHistoryEventHandler.java| 18 +-
 2 files changed, 20 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0d7c221/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 8f35c6f..68564b6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -698,6 +698,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6614. Remove unnecessary code in TestMapreduceConfigFields.
 (Kai Sasaki via aajisaka)
 
+MAPREDUCE-6610. JobHistoryEventHandler should not swallow timeline response
+(Li Lu via jianhe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0d7c221/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index dd0de2a..63e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -23,6 +23,7 @@ import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.Timer;
 import java.util.TimerTask;
@@ -63,6 +64,7 @@ import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
@@ -1012,7 +1014,21 @@ public class JobHistoryEventHandler extends 
AbstractService
 }
 
 try {
-  timelineClient.putEntities(tEntity);
+  TimelinePutResponse response = timelineClient.putEntities(tEntity);
+  List errors = response.getErrors();
+  if (errors.size() == 0) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Timeline entities are successfully put in event " + event
+  .getEventType());
+}
+  } else {
+for (TimelinePutResponse.TimelinePutError error : errors) {
+  LOG.error(
+  "Error when publishing entity [" + error.getEntityType() + ","
+  + error.getEntityId() + "], server side error code: "
+  + error.getErrorCode());
+}
+  }
 } catch (IOException ex) {
   LOG.error("Error putting entity " + tEntity.getEntityId() + " to 
Timeline"
   + "Server", ex);



[27/50] [abbrv] hadoop git commit: MAPREDUCE-6595. Fix findbugs warnings in OutputCommitter and FileOutputCommitter. Contributed by Akira AJISAKA.

2016-02-01 Thread aengineer
MAPREDUCE-6595. Fix findbugs warnings in OutputCommitter and 
FileOutputCommitter. Contributed by Akira AJISAKA.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86560a49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86560a49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86560a49

Branch: refs/heads/HDFS-7240
Commit: 86560a4978c498bd5e892c509a466807a812b8a7
Parents: 662e17b
Author: Junping Du 
Authored: Thu Jan 28 06:14:21 2016 -0800
Committer: Junping Du 
Committed: Thu Jan 28 06:14:21 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 hadoop-mapreduce-project/dev-support/findbugs-exclude.xml | 2 ++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86560a49/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 3f85a9b..a8abdb4 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -462,6 +462,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6584. Remove trailing whitespaces from mapred-default.xml.
 (aajisaka)
 
+MAPREDUCE-6595. Fix findbugs warnings in OutputCommitter and 
+FileOutputCommitter. (Akira AJISAKA via junping_du)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6376. Add avro binary support for jhist files (Ray Chiang via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86560a49/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml 
b/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
index f1ef2b8..9b4d8c9 100644
--- a/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
+++ b/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
@@ -159,6 +159,7 @@



+   


  
@@ -170,6 +171,7 @@



+   


  



[09/50] [abbrv] hadoop git commit: HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently. Contributed by Mingliang Liu.

2016-02-01 Thread aengineer
HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently. Contributed by 
Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8650fea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8650fea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8650fea

Branch: refs/heads/HDFS-7240
Commit: e8650fea1f0837026cbb36ae8bf51c6133259809
Parents: ec4d2d9
Author: Jitendra Pandey 
Authored: Mon Jan 25 15:42:25 2016 -0800
Committer: Jitendra Pandey 
Committed: Mon Jan 25 16:08:46 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../apache/hadoop/hdfs/TestLeaseRecovery2.java  | 48 ++--
 2 files changed, 37 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8650fea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f35ae3d..68d5de6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2690,6 +2690,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9625. set replication for empty file failed when set storage policy
 (DENG FEI via vinayakumarb)
 
+HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently (Mingliang 
Liu
+via jitendra)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8650fea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
index 13e8644..e8cd476 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
@@ -21,11 +21,14 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -49,10 +53,11 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestLeaseRecovery2 {
   
@@ -85,12 +90,15 @@ public class TestLeaseRecovery2 {
* 
* @throws IOException
*/
-  @BeforeClass
-  public static void startUp() throws IOException {
+  @Before
+  public void startUp() throws IOException {
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
 
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
+cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(5)
+.checkExitOnShutdown(false)
+.build();
 cluster.waitActive();
 dfs = cluster.getFileSystem();
   }
@@ -99,8 +107,8 @@ public class TestLeaseRecovery2 {
* stop the cluster
* @throws IOException
*/
-  @AfterClass
-  public static void tearDown() throws IOException {
+  @After
+  public void tearDown() throws IOException {
 if (cluster != null) {
   IOUtils.closeStream(dfs);
   cluster.shutdown();
@@ -419,17 +427,17 @@ public class TestLeaseRecovery2 {
* 
* @throws Exception
*/
-  @Test
+  @Test(timeout = 3)
   public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
 

[25/50] [abbrv] hadoop git commit: MAPREDUCE-6431. JobClient should be an AutoClosable (haibochen via rkanter)

2016-02-01 Thread aengineer
MAPREDUCE-6431. JobClient should be an AutoClosable (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dca0dc8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dca0dc8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dca0dc8a

Branch: refs/heads/HDFS-7240
Commit: dca0dc8ac28e843acd8b79c9560245638a539fde
Parents: ec25c7f
Author: Robert Kanter 
Authored: Wed Jan 27 17:11:07 2016 -0800
Committer: Robert Kanter 
Committed: Wed Jan 27 17:11:07 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt  |  2 ++
 .../test/java/org/apache/hadoop/mapred/TestJobClient.java | 10 ++
 .../src/main/java/org/apache/hadoop/mapred/JobClient.java |  3 ++-
 3 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca0dc8a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 68564b6..3f85a9b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -311,6 +311,8 @@ Release 2.9.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6431. JobClient should be an AutoClosable (haibochen via rkanter)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca0dc8a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
index b18b531..bf37b03 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
@@ -87,4 +87,14 @@ public class TestJobClient {
 client.getClusterHandle().getStagingAreaDir().toString()
 .equals(client.getStagingAreaDir().toString()));
   }
+
+  /**
+   * Asks the compiler to check if JobClient is AutoClosable.
+   */
+  @Test(timeout = 1)
+  public void testAutoClosable() throws IOException {
+Configuration conf = new Configuration();
+try (JobClient jobClient = new JobClient(conf)) {
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca0dc8a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
index cf123c7..baa6221 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
@@ -137,7 +137,7 @@ import org.apache.hadoop.util.ToolRunner;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class JobClient extends CLI {
+public class JobClient extends CLI implements AutoCloseable {
 
   @InterfaceAudience.Private
   public static final String MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_KEY =
@@ -499,6 +499,7 @@ public class JobClient extends CLI {
   /**
* Close the JobClient.
*/
+  @Override
   public synchronized void close() throws IOException {
 cluster.close();
   }



[24/50] [abbrv] hadoop git commit: HDFS-9677. Rename generationStampV1/generationStampV2 to legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.

2016-02-01 Thread aengineer
HDFS-9677. Rename generationStampV1/generationStampV2 to 
legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec25c7f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec25c7f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec25c7f9

Branch: refs/heads/HDFS-7240
Commit: ec25c7f9c7e60c077d8c4143253c20445fcdaecf
Parents: 3a95713
Author: Jing Zhao 
Authored: Wed Jan 27 16:34:40 2016 -0800
Committer: Jing Zhao 
Committed: Wed Jan 27 16:34:40 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/blockmanagement/BlockIdManager.java  | 83 ++--
 .../OutOfLegacyGenerationStampsException.java   | 38 +
 .../OutOfV1GenerationStampsException.java   | 38 -
 .../hdfs/server/common/HdfsServerConstants.java |  3 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  4 +-
 .../hdfs/server/namenode/FSImageFormat.java | 12 +--
 .../server/namenode/FSImageFormatProtobuf.java  | 12 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  4 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|  4 +-
 .../blockmanagement/TestSequentialBlockId.java  | 18 ++---
 .../hdfs/server/namenode/TestEditLog.java   |  6 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  4 +-
 .../hdfs/server/namenode/TestSaveNamespace.java |  2 +-
 15 files changed, 119 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec25c7f9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 097c051..7e75558 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -959,6 +959,9 @@ Release 2.9.0 - UNRELEASED
 HDFS-9541. Add hdfsStreamBuilder API to libhdfs to support 
defaultBlockSizes
 greater than 2 GB. (cmccabe via zhz)
 
+HDFS-9677. Rename generationStampV1/generationStampV2 to
+legacyGenerationStamp/generationStamp. (Mingliang Liu via jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec25c7f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 9c71287..3f21d9b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -36,11 +36,11 @@ public class BlockIdManager {
* The global generation stamp for legacy blocks with randomly
* generated block IDs.
*/
-  private final GenerationStamp generationStampV1 = new GenerationStamp();
+  private final GenerationStamp legacyGenerationStamp = new GenerationStamp();
   /**
* The global generation stamp for this file system.
*/
-  private final GenerationStamp generationStampV2 = new GenerationStamp();
+  private final GenerationStamp generationStamp = new GenerationStamp();
   /**
* The value of the generation stamp when the first switch to sequential
* block IDs was made. Blocks with generation stamps below this value
@@ -49,7 +49,7 @@ public class BlockIdManager {
* (or initialized as an offset from the V1 (legacy) generation stamp on
* upgrade).
*/
-  private long generationStampV1Limit;
+  private long legacyGenerationStampLimit;
   /**
* The global block ID space for this file system.
*/
@@ -57,7 +57,8 @@ public class BlockIdManager {
   private final SequentialBlockGroupIdGenerator blockGroupIdGenerator;
 
   public BlockIdManager(BlockManager blockManager) {
-this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
+this.legacyGenerationStampLimit =
+HdfsConstants.GRANDFATHER_GENERATION_STAMP;
 this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager);
 this.blockGroupIdGenerator = new 
SequentialBlockGroupIdGenerator(blockManager);
   }
@@ -68,14 +69,14 @@ public class BlockIdManager {
* Should be invoked only during the first upgrade to
* sequential block IDs.
*/
-  public long upgradeGenerationStampToV2() {
-Preconditions.checkState(generationStampV2.getCurrentValue() ==
+  

[17/50] [abbrv] hadoop git commit: YARN-4573. Fix test failure in TestRMAppTransitions#testAppRunningKill and testAppKilledKilled. (Takashi Ohnishi via rohithsharmaks)

2016-02-01 Thread aengineer
YARN-4573. Fix test failure in TestRMAppTransitions#testAppRunningKill and 
testAppKilledKilled. (Takashi Ohnishi via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c01bee01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c01bee01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c01bee01

Branch: refs/heads/HDFS-7240
Commit: c01bee010832ca31d8e60e5461181cdf05140602
Parents: 4efdf3a
Author: Rohith Sharma K S 
Authored: Wed Jan 27 08:23:02 2016 +0530
Committer: Rohith Sharma K S 
Committed: Wed Jan 27 08:23:02 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../server/resourcemanager/rmapp/TestRMAppTransitions.java  | 9 +
 2 files changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c01bee01/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 435eb68..2fbecdb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -162,6 +162,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4612. Fix rumen and scheduler load simulator handle killed tasks 
properly.
 (Ming Ma via xgong)
 
+YARN-4573. Fix test failure in TestRMAppTransitions#testAppRunningKill and
+testAppKilledKilled. (Takashi Ohnishi via rohithsharmaks)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c01bee01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
index f2f09de..293c0b6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
@@ -275,7 +275,7 @@ public class TestRMAppTransitions {
   // Test expected newly created app state
   private static void testAppStartState(ApplicationId applicationId, 
   String user, String name, String queue, RMApp application) {
-Assert.assertTrue("application start time is not greater then 0", 
+Assert.assertTrue("application start time is not greater than 0",
 application.getStartTime() > 0);
 Assert.assertTrue("application start time is before currentTime", 
 application.getStartTime() <= System.currentTimeMillis());
@@ -300,7 +300,7 @@ public class TestRMAppTransitions {
 
   // test to make sure times are set when app finishes
   private static void assertStartTimeSet(RMApp application) {
-Assert.assertTrue("application start time is not greater then 0", 
+Assert.assertTrue("application start time is not greater than 0",
 application.getStartTime() > 0);
 Assert.assertTrue("application start time is before currentTime", 
 application.getStartTime() <= System.currentTimeMillis());
@@ -319,9 +319,9 @@ public class TestRMAppTransitions {
   // test to make sure times are set when app finishes
   private void assertTimesAtFinish(RMApp application) {
 assertStartTimeSet(application);
-Assert.assertTrue("application finish time is not greater then 0",
+Assert.assertTrue("application finish time is not greater than 0",
 (application.getFinishTime() > 0));
-Assert.assertTrue("application finish time is not >= then start time",
+Assert.assertTrue("application finish time is not >= than start time",
 (application.getFinishTime() >= application.getStartTime()));
   }
 
@@ -364,6 +364,7 @@ public class TestRMAppTransitions {
 application.getCurrentAppAttempt().handle(
 new 
RMAppAttemptEvent(application.getCurrentAppAttempt().getAppAttemptId(),
 RMAppAttemptEventType.ATTEMPT_UPDATE_SAVED));
+rmDispatcher.await();
   }
 
   protected RMApp testCreateAppNewSaving(



[37/50] [abbrv] hadoop git commit: HDFS-9706. Log more details in debug logs in BlockReceiver's constructor. (Xiao Chen via Yongjun Zhang)

2016-02-01 Thread aengineer
HDFS-9706. Log more details in debug logs in BlockReceiver's constructor. (Xiao 
Chen via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ee06031
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ee06031
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ee06031

Branch: refs/heads/HDFS-7240
Commit: 8ee060311c89b7faa71dd039481a97ba15e2413d
Parents: ac68666
Author: Yongjun Zhang 
Authored: Thu Jan 28 22:53:26 2016 -0800
Committer: Yongjun Zhang 
Committed: Thu Jan 28 23:04:03 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hdfs/server/datanode/BlockReceiver.java   | 18 --
 2 files changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ee06031/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9b80aa1..570caa5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1860,6 +1860,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8898. Create API and command-line argument to get quota and quota
 usage without detailed content summary. (Ming Ma via kihwal)
 
+HDFS-9706. Log more details in debug logs in BlockReceiver's constructor.
+(Xiao Chen via Yongjun Zhang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ee06031/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index e7908a5..0dc8cab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -187,12 +187,18 @@ class BlockReceiver implements Closeable {
   this.maxSendIdleTime = (long) (readTimeout * 0.9);
   if (LOG.isDebugEnabled()) {
 LOG.debug(getClass().getSimpleName() + ": " + block
-+ "\n  isClient  =" + isClient + ", clientname=" + clientname
-+ "\n  isDatanode=" + isDatanode + ", srcDataNode=" + srcDataNode
-+ "\n  inAddr=" + inAddr + ", myAddr=" + myAddr
-+ "\n  cachingStrategy = " + cachingStrategy
-+ "\n  pinning=" + pinning
-);
++ "\n storageType=" + storageType + ", inAddr=" + inAddr
++ ", myAddr=" + myAddr + "\n stage=" + stage + ", newGs=" + newGs
++ ", minBytesRcvd=" + minBytesRcvd
++ ", maxBytesRcvd=" + maxBytesRcvd + "\n clientname=" + clientname
++ ", srcDataNode=" + srcDataNode
++ ", datanode=" + datanode.getDisplayName()
++ "\n requestedChecksum=" + requestedChecksum
++ "\n cachingStrategy=" + cachingStrategy
++ "\n allowLazyPersist=" + allowLazyPersist + ", pinning=" + 
pinning
++ ", isClient=" + isClient + ", isDatanode=" + isDatanode
++ ", responseInterval=" + responseInterval
+);
   }
 
   //



hadoop git commit: HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames (Mingliang Liu via stevel)

2016-02-01 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 6eae76f7a -> dcf2c8b3c


HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames  (Mingliang 
Liu via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcf2c8b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcf2c8b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcf2c8b3

Branch: refs/heads/branch-2.8
Commit: dcf2c8b3c88408303511e51ff93e8ce2b68750ba
Parents: 6eae76f
Author: Steve Loughran 
Authored: Mon Feb 1 16:01:22 2016 +
Committer: Steve Loughran 
Committed: Mon Feb 1 16:01:22 2016 +

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  3 +-
 .../hdfs/server/namenode/TestFSNamesystem.java  | 78 
 3 files changed, 83 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcf2c8b3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a1c5794..89c9e96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1687,6 +1687,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final
 DatanodeStorage.State state)' method (Daryn Sharp via vinayakumarb)
 
+HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames
+(Mingliang Liu via stevel)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcf2c8b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index e00a989..1ecb286 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -900,7 +900,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 
   private List initAuditLoggers(Configuration conf) {
 // Initialize the custom access loggers if configured.
-Collection alClasses = 
conf.getStringCollection(DFS_NAMENODE_AUDIT_LOGGERS_KEY);
+Collection alClasses =
+conf.getTrimmedStringCollection(DFS_NAMENODE_AUDIT_LOGGERS_KEY);
 List auditLoggers = Lists.newArrayList();
 if (alClasses != null && !alClasses.isEmpty()) {
   for (String className : alClasses) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcf2c8b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index 15fc40e..124225b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -20,14 +20,18 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.hamcrest.CoreMatchers.either;
+import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.InetAddress;
 import java.net.URI;
 import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -39,6 +43,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.test.GenericTestUtils;
 import 

hadoop git commit: HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames (Mingliang Liu via stevel)

2016-02-01 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8f2622b6a -> af2dccbca


HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames  (Mingliang 
Liu via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af2dccbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af2dccbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af2dccbc

Branch: refs/heads/trunk
Commit: af2dccbca50b25f84d9d6c88e1a237a42261ce02
Parents: 8f2622b
Author: Steve Loughran 
Authored: Mon Feb 1 16:01:22 2016 +
Committer: Steve Loughran 
Committed: Mon Feb 1 16:01:50 2016 +

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  3 +-
 .../hdfs/server/namenode/TestFSNamesystem.java  | 78 
 3 files changed, 83 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2dccbc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5a8b525..fdf69d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2661,6 +2661,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final
 DatanodeStorage.State state)' method (Daryn Sharp via vinayakumarb)
 
+HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames
+(Mingliang Liu via stevel)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2dccbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 5e2e975..0387c32 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -889,7 +889,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 
   private List initAuditLoggers(Configuration conf) {
 // Initialize the custom access loggers if configured.
-Collection alClasses = 
conf.getStringCollection(DFS_NAMENODE_AUDIT_LOGGERS_KEY);
+Collection alClasses =
+conf.getTrimmedStringCollection(DFS_NAMENODE_AUDIT_LOGGERS_KEY);
 List auditLoggers = Lists.newArrayList();
 if (alClasses != null && !alClasses.isEmpty()) {
   for (String className : alClasses) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af2dccbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index 6308179..b9a2d15 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -20,14 +20,18 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.hamcrest.CoreMatchers.either;
+import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.InetAddress;
 import java.net.URI;
 import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -38,6 +42,7 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.test.GenericTestUtils;
 import 

hadoop git commit: HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames (Mingliang Liu via stevel)

2016-02-01 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 71374cca9 -> 7786e97df


HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames  (Mingliang 
Liu via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7786e97d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7786e97d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7786e97d

Branch: refs/heads/branch-2
Commit: 7786e97df2ecec1d4ae6d4888fdf492f6d991f5c
Parents: 71374cc
Author: Steve Loughran 
Authored: Mon Feb 1 16:01:22 2016 +
Committer: Steve Loughran 
Committed: Mon Feb 1 16:01:32 2016 +

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  3 +-
 .../hdfs/server/namenode/TestFSNamesystem.java  | 78 
 3 files changed, 83 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7786e97d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6b80233..dfd7e18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1769,6 +1769,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final
 DatanodeStorage.State state)' method (Daryn Sharp via vinayakumarb)
 
+HDFS-9708. FSNamesystem.initAuditLoggers() doesn't trim classnames
+(Mingliang Liu via stevel)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7786e97d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9fcfd8a..6674ec3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -875,7 +875,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 
   private List initAuditLoggers(Configuration conf) {
 // Initialize the custom access loggers if configured.
-Collection alClasses = 
conf.getStringCollection(DFS_NAMENODE_AUDIT_LOGGERS_KEY);
+Collection alClasses =
+conf.getTrimmedStringCollection(DFS_NAMENODE_AUDIT_LOGGERS_KEY);
 List auditLoggers = Lists.newArrayList();
 if (alClasses != null && !alClasses.isEmpty()) {
   for (String className : alClasses) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7786e97d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index 6308179..b9a2d15 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -20,14 +20,18 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.hamcrest.CoreMatchers.either;
+import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.InetAddress;
 import java.net.URI;
 import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -38,6 +42,7 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.test.GenericTestUtils;
 import 

hadoop git commit: MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread. Contributed by Xuan Gong

2016-02-01 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk af2dccbca -> 59a212b6e


MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread. 
Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59a212b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59a212b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59a212b6

Branch: refs/heads/trunk
Commit: 59a212b6e1265adfa9b55c71b65a22157dfccf77
Parents: af2dccb
Author: Jason Lowe 
Authored: Mon Feb 1 16:05:06 2016 +
Committer: Jason Lowe 
Committed: Mon Feb 1 16:05:06 2016 +

--
 hadoop-mapreduce-project/CHANGES.txt|  6 +
 .../org/apache/hadoop/mapred/ClientCache.java   | 24 +++-
 .../hadoop/mapred/ClientServiceDelegate.java| 16 +
 .../org/apache/hadoop/mapred/YARNRunner.java| 11 +
 .../mapred/YarnClientProtocolProvider.java  |  5 ++--
 .../TestYarnClientProtocolProvider.java |  6 +++--
 6 files changed, 63 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a212b6/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 8261b34..55284da 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -736,6 +736,9 @@ Release 2.7.3 - UNRELEASED
 MAPREDUCE-6554. MRAppMaster servicestart failing with NPE in
 MRAppMaster#parsePreviousJobHistory (Bibin A Chundatt via jlowe)
 
+MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread.
+(Xuan Gong via jlowe)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES
@@ -1041,6 +1044,9 @@ Release 2.6.4 - UNRELEASED
 MAPREDUCE-6554. MRAppMaster servicestart failing with NPE in
 MRAppMaster#parsePreviousJobHistory (Bibin A Chundatt via jlowe)
 
+MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread.
+(Xuan Gong via jlowe)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a212b6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
index 4335c82..93ea5c4 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
@@ -22,11 +22,11 @@ import java.io.IOException;
 import java.security.PrivilegedAction;
 import java.util.HashMap;
 import java.util.Map;
-
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
@@ -97,4 +97,26 @@ public class ClientCache {
   }
 });
   }
+
+  public void close() throws IOException {
+if (rm != null) {
+  rm.close();
+}
+
+if (hsProxy != null) {
+  RPC.stopProxy(hsProxy);
+  hsProxy = null;
+}
+
+if (cache != null && !cache.isEmpty()) {
+  for (ClientServiceDelegate delegate : cache.values()) {
+if (delegate != null) {
+  delegate.close();
+  delegate = null;
+}
+  }
+  cache.clear();
+  cache = null;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a212b6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 8517c19..eac8dbc 100644
--- 

hadoop git commit: MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread. Contributed by Xuan Gong (cherry picked from commit 59a212b6e1265adfa9b55c71b65a22157dfccf77)

2016-02-01 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 dcf2c8b3c -> 7bb48ed16


MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread. 
Contributed by Xuan Gong
(cherry picked from commit 59a212b6e1265adfa9b55c71b65a22157dfccf77)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bb48ed1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bb48ed1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bb48ed1

Branch: refs/heads/branch-2.8
Commit: 7bb48ed162b5216caff643df4586c01230c43c53
Parents: dcf2c8b
Author: Jason Lowe 
Authored: Mon Feb 1 16:05:06 2016 +
Committer: Jason Lowe 
Committed: Mon Feb 1 16:11:20 2016 +

--
 hadoop-mapreduce-project/CHANGES.txt|  6 +
 .../org/apache/hadoop/mapred/ClientCache.java   | 24 +++-
 .../hadoop/mapred/ClientServiceDelegate.java| 16 +
 .../org/apache/hadoop/mapred/YARNRunner.java| 11 +
 .../mapred/YarnClientProtocolProvider.java  |  5 ++--
 .../TestYarnClientProtocolProvider.java |  6 +++--
 6 files changed, 63 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bb48ed1/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index a82f872..a745bed 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -433,6 +433,9 @@ Release 2.7.3 - UNRELEASED
 MAPREDUCE-6554. MRAppMaster servicestart failing with NPE in
 MRAppMaster#parsePreviousJobHistory (Bibin A Chundatt via jlowe)
 
+MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread.
+(Xuan Gong via jlowe)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES
@@ -735,6 +738,9 @@ Release 2.6.4 - UNRELEASED
 MAPREDUCE-6554. MRAppMaster servicestart failing with NPE in
 MRAppMaster#parsePreviousJobHistory (Bibin A Chundatt via jlowe)
 
+MAPREDUCE-6618. YarnClientProtocolProvider leaking the YarnClient thread.
+(Xuan Gong via jlowe)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bb48ed1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
index 4335c82..93ea5c4 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
@@ -22,11 +22,11 @@ import java.io.IOException;
 import java.security.PrivilegedAction;
 import java.util.HashMap;
 import java.util.Map;
-
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
@@ -97,4 +97,26 @@ public class ClientCache {
   }
 });
   }
+
+  public void close() throws IOException {
+if (rm != null) {
+  rm.close();
+}
+
+if (hsProxy != null) {
+  RPC.stopProxy(hsProxy);
+  hsProxy = null;
+}
+
+if (cache != null && !cache.isEmpty()) {
+  for (ClientServiceDelegate delegate : cache.values()) {
+if (delegate != null) {
+  delegate.close();
+  delegate = null;
+}
+  }
+  cache.clear();
+  cache = null;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bb48ed1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 8517c19..eac8dbc 100644
---