hadoop git commit: Revert "HDFS-9820. Improve distcp to support efficient restore to an earlier snapshot. Contributed by Yongjun Zhang."

2016-10-17 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk b61fb267b -> 0bc6d37f3


Revert "HDFS-9820. Improve distcp to support efficient restore to an earlier 
snapshot. Contributed by Yongjun Zhang."

This reverts commit 412c4c9a342b73bf1c1a7f43ea91245cbf94d02d.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bc6d37f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bc6d37f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bc6d37f

Branch: refs/heads/trunk
Commit: 0bc6d37f3c1e7c2a8682dffa95461a884bd6ba17
Parents: b61fb26
Author: Yongjun Zhang 
Authored: Mon Oct 17 22:47:37 2016 -0700
Committer: Yongjun Zhang 
Committed: Mon Oct 17 22:47:37 2016 -0700

--
 .../java/org/apache/hadoop/tools/DiffInfo.java  |  47 +-
 .../java/org/apache/hadoop/tools/DistCp.java|  34 +-
 .../apache/hadoop/tools/DistCpConstants.java|   1 -
 .../apache/hadoop/tools/DistCpOptionSwitch.java |   5 -
 .../org/apache/hadoop/tools/DistCpOptions.java  |  79 +-
 .../org/apache/hadoop/tools/DistCpSync.java | 256 ++
 .../org/apache/hadoop/tools/OptionsParser.java  |  27 +-
 .../apache/hadoop/tools/SimpleCopyListing.java  |  17 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java |   4 +-
 .../hadoop/tools/TestDistCpSyncReverseBase.java | 868 ---
 .../tools/TestDistCpSyncReverseFromSource.java  |  36 -
 .../tools/TestDistCpSyncReverseFromTarget.java  |  36 -
 .../apache/hadoop/tools/TestOptionsParser.java  |  85 +-
 13 files changed, 155 insertions(+), 1340 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bc6d37f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
index 7e56301..79bb7fe 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
@@ -44,49 +44,28 @@ class DiffInfo {
   };
 
   /** The source file/dir of the rename or deletion op */
-  private Path source;
-  /** The target file/dir of the rename op. Null means the op is deletion. */
-  private Path target;
-
-  private SnapshotDiffReport.DiffType type;
+  final Path source;
   /**
* The intermediate file/dir for the op. For a rename or a delete op,
* we first rename the source to this tmp file/dir.
*/
   private Path tmp;
+  /** The target file/dir of the rename op. Null means the op is deletion. */
+  Path target;
 
-  DiffInfo(final Path source, final Path target,
-  SnapshotDiffReport.DiffType type) {
-assert source != null;
-this.source = source;
-this.target= target;
-this.type = type;
-  }
-
-  void setSource(final Path source) {
-this.source = source;
-  }
-
-  Path getSource() {
-return source;
-  }
-
-  void setTarget(final Path target) {
-this.target = target;
-  }
+  private final SnapshotDiffReport.DiffType type;
 
-  Path getTarget() {
-return target;
+  public SnapshotDiffReport.DiffType getType(){
+return this.type;
   }
 
-  public void setType(final SnapshotDiffReport.DiffType type){
+  DiffInfo(Path source, Path target, SnapshotDiffReport.DiffType type) {
+assert source != null;
+this.source = source;
+this.target= target;
 this.type = type;
   }
 
-  public SnapshotDiffReport.DiffType getType(){
-return type;
-  }
-
   void setTmp(Path tmp) {
 this.tmp = tmp;
   }
@@ -94,10 +73,4 @@ class DiffInfo {
   Path getTmp() {
 return tmp;
   }
-
-  @Override
-  public String toString() {
-return type + ": src=" + String.valueOf(source) + " tgt="
-+ String.valueOf(target) + " tmp=" + String.valueOf(tmp);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bc6d37f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index e9decd2..be58f13 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -77,21 +77,6 @@ public class DistCp extends Configured implements Tool {
   private boolean submitted;
   private FileSystem jobFS;
 
-  private void prepareFileListing(Job job) throws Exception {
-if (inputOptions.shouldUseSnapshotDiff()) {
-  try {
-DistCpSync distCpSync = new 

hadoop git commit: HDFS-10712. TestDataNodeVolumeFailure should pass not null BlockReportContext. Contributed by Vinitha Gankidi.

2016-10-17 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 f8b2d7720 -> ae972dd08


HDFS-10712. TestDataNodeVolumeFailure should pass not null BlockReportContext. 
Contributed by Vinitha Gankidi.

(cherry picked from commit 278eeb105611444bee0261ce7a0e6ea983a5c615)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae972dd0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae972dd0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae972dd0

Branch: refs/heads/branch-2.8
Commit: ae972dd08006a76a08e2a34dab73b8a211246cec
Parents: f8b2d77
Author: Vinitha Reddy Gankidi 
Authored: Mon Oct 17 18:55:26 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Oct 17 19:02:29 2016 -0700

--
 .../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae972dd0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 379e9e6..aa1e5e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -67,6 +67,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -192,7 +193,8 @@ public class TestDataNodeVolumeFailure {
 new StorageBlockReport(dnStorage, blockList);
 }
 
-cluster.getNameNodeRpc().blockReport(dnR, bpid, reports, null);
+cluster.getNameNodeRpc().blockReport(dnR, bpid, reports,
+new BlockReportContext(1, 0, System.nanoTime(), 0));
 
 // verify number of blocks and files...
 verify(filename, filesize);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10712. TestDataNodeVolumeFailure should pass not null BlockReportContext. Contributed by Vinitha Gankidi.

2016-10-17 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 1fcaba9b1 -> 86f291f05


HDFS-10712. TestDataNodeVolumeFailure should pass not null BlockReportContext. 
Contributed by Vinitha Gankidi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86f291f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86f291f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86f291f0

Branch: refs/heads/branch-2.7
Commit: 86f291f057d3479d06cbfc1f77ed38d36154fcf9
Parents: 1fcaba9
Author: Vinitha Reddy Gankidi 
Authored: Mon Oct 17 18:55:26 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Oct 17 19:08:52 2016 -0700

--
 .../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86f291f0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 41e8d7b..c265679 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -186,7 +186,8 @@ public class TestDataNodeVolumeFailure {
 new StorageBlockReport(dnStorage, blockList);
 }
 
-cluster.getNameNodeRpc().blockReport(dnR, bpid, reports, null);
+cluster.getNameNodeRpc().blockReport(dnR, bpid, reports,
+new BlockReportContext(1, 0, System.nanoTime()));
 
 // verify number of blocks and files...
 verify(filename, filesize);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10712. TestDataNodeVolumeFailure should pass not null BlockReportContext. Contributed by Vinitha Gankidi.

2016-10-17 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d55a7f893 -> 278eeb105


HDFS-10712. TestDataNodeVolumeFailure should pass not null BlockReportContext. 
Contributed by Vinitha Gankidi.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/278eeb10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/278eeb10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/278eeb10

Branch: refs/heads/branch-2
Commit: 278eeb105611444bee0261ce7a0e6ea983a5c615
Parents: d55a7f8
Author: Vinitha Reddy Gankidi 
Authored: Mon Oct 17 18:55:26 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Oct 17 18:55:26 2016 -0700

--
 .../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/278eeb10/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 379e9e6..aa1e5e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -67,6 +67,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -192,7 +193,8 @@ public class TestDataNodeVolumeFailure {
 new StorageBlockReport(dnStorage, blockList);
 }
 
-cluster.getNameNodeRpc().blockReport(dnR, bpid, reports, null);
+cluster.getNameNodeRpc().blockReport(dnR, bpid, reports,
+new BlockReportContext(1, 0, System.nanoTime(), 0));
 
 // verify number of blocks and files...
 verify(filename, filesize);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. Contributed by Vinitha Gankidi.

2016-10-17 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 8eb0b6f39 -> 1fcaba9b1


HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for 
interleaving block reports. Contributed by Vinitha Gankidi.

(cherry picked from commit 391ce535a739dc92cb90017d759217265a4fd969)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fcaba9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fcaba9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fcaba9b

Branch: refs/heads/branch-2.7
Commit: 1fcaba9b14aa932e91f9cd18d4d98adb744e
Parents: 8eb0b6f
Author: Vinitha Reddy Gankidi 
Authored: Mon Oct 17 18:37:44 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Oct 17 18:37:44 2016 -0700

--
 .../server/blockmanagement/BlockManager.java| 55 ++---
 .../blockmanagement/DatanodeDescriptor.java | 49 ---
 .../blockmanagement/DatanodeStorageInfo.java| 11 
 .../hdfs/server/namenode/NameNodeRpcServer.java |  2 +-
 .../blockmanagement/TestBlockManager.java   | 10 +--
 .../TestNameNodePrunesMissingStorages.java  | 64 +++-
 .../server/datanode/BlockReportTestBase.java| 50 +++
 7 files changed, 122 insertions(+), 119 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fcaba9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 81a0d22..01fd66c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1107,6 +1107,8 @@ public class BlockManager {
   invalidateBlocks.remove(node, block);
 }
 namesystem.checkSafeMode();
+LOG.info("Removed blocks associated with storage {} from DataNode {}",
+storageInfo, node);
   }
 
   /**
@@ -1827,8 +1829,8 @@ public class BlockManager {
*/
   public boolean processReport(final DatanodeID nodeID,
   final DatanodeStorage storage,
-  final BlockListAsLongs newReport, BlockReportContext context,
-  boolean lastStorageInRpc) throws IOException {
+  final BlockListAsLongs newReport,
+  BlockReportContext context) throws IOException {
 namesystem.writeLock();
 final long startTime = Time.monotonicNow(); //after acquiring write lock
 final long endTime;
@@ -1870,29 +1872,6 @@ public class BlockManager {
   }
   
   storageInfo.receivedBlockReport();
-  if (context != null) {
-storageInfo.setLastBlockReportId(context.getReportId());
-if (lastStorageInRpc) {
-  int rpcsSeen = node.updateBlockReportContext(context);
-  if (rpcsSeen >= context.getTotalRpcs()) {
-List zombies = node.removeZombieStorages();
-if (zombies.isEmpty()) {
-  LOG.debug("processReport 0x{}: no zombie storages found.",
-  Long.toHexString(context.getReportId()));
-} else {
-  for (DatanodeStorageInfo zombie : zombies) {
-removeZombieReplicas(context, zombie);
-  }
-}
-node.clearBlockReportContext();
-  } else {
-LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
-"report.", Long.toHexString(context.getReportId()),
-(context.getTotalRpcs() - rpcsSeen)
-);
-  }
-}
-  }
 } finally {
   endTime = Time.monotonicNow();
   namesystem.writeUnlock();
@@ -1919,32 +1898,6 @@ public class BlockManager {
 return !node.hasStaleStorages();
   }
 
-  private void removeZombieReplicas(BlockReportContext context,
-  DatanodeStorageInfo zombie) {
-LOG.warn("processReport 0x{}: removing zombie storage {}, which no " +
- "longer exists on the DataNode.",
-  Long.toHexString(context.getReportId()), zombie.getStorageID());
-assert(namesystem.hasWriteLock());
-Iterator iter = zombie.getBlockIterator();
-int prevBlocks = zombie.numBlocks();
-while (iter.hasNext()) {
-  BlockInfoContiguous block = iter.next();
-  // We assume that a block can be on only one storage in a DataNode.
-  // That's why we pass in the DatanodeDescriptor rather than the
-  // DatanodeStorageInfo.
-  // TODO: remove this assumption 

[1/2] hadoop git commit: HDFS-9390. Block management for maintenance states.

2016-10-17 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a5a56c356 -> d55a7f893


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d55a7f89/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index 63617ad..c125f45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -18,13 +18,19 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -32,6 +38,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
@@ -40,13 +48,23 @@ import org.junit.Test;
  * This class tests node maintenance.
  */
 public class TestMaintenanceState extends AdminStatesBaseTest {
-  public static final Log LOG = LogFactory.getLog(TestMaintenanceState.class);
-  static private final long EXPIRATION_IN_MS = 500;
+  public static final Logger LOG =
+  LoggerFactory.getLogger(TestMaintenanceState.class);
+  static private final long EXPIRATION_IN_MS = 50;
+  private int minMaintenanceR =
+  DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_DEFAULT;
 
   public TestMaintenanceState() {
 setUseCombinedHostFileManager();
   }
 
+  void setMinMaintenanceR(int minMaintenanceR) {
+this.minMaintenanceR = minMaintenanceR;
+getConf().setInt(
+DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY,
+minMaintenanceR);
+  }
+
   /**
* Verify a node can transition from AdminStates.ENTERING_MAINTENANCE to
* AdminStates.NORMAL.
@@ -55,21 +73,25 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
   public void testTakeNodeOutOfEnteringMaintenance() throws Exception {
 LOG.info("Starting testTakeNodeOutOfEnteringMaintenance");
 final int replicas = 1;
-final int numNamenodes = 1;
-final int numDatanodes = 1;
-final Path file1 = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
+final Path file = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
 
-startCluster(numNamenodes, numDatanodes);
+startCluster(1, 1);
 
-FileSystem fileSys = getCluster().getFileSystem(0);
-writeFile(fileSys, file1, replicas, 1);
+final FileSystem fileSys = getCluster().getFileSystem(0);
+final FSNamesystem ns = getCluster().getNamesystem(0);
+writeFile(fileSys, file, replicas, 1);
 
-DatanodeInfo nodeOutofService = takeNodeOutofService(0,
+final DatanodeInfo nodeOutofService = takeNodeOutofService(0,
 null, Long.MAX_VALUE, null, AdminStates.ENTERING_MAINTENANCE);
 
+// When node is in ENTERING_MAINTENANCE state, it can still serve read
+// requests
+assertNull(checkWithRetry(ns, fileSys, file, replicas, null,
+nodeOutofService));
+
 putNodeInService(0, nodeOutofService.getDatanodeUuid());
 
-cleanupFile(fileSys, file1);
+cleanupFile(fileSys, file);
   }
 
   /**
@@ -80,23 +102,21 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
   public void testEnteringMaintenanceExpiration() throws Exception {
 LOG.info("Starting testEnteringMaintenanceExpiration");
 final int replicas = 1;
-final int numNamenodes = 1;
-final int numDatanodes = 1;
-final Path file1 = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
+final Path file = new Path("/testEnteringMaintenanceExpiration.dat");
 
-startCluster(numNamenodes, numDatanodes);
+startCluster(1, 1);
 
-FileSystem fileSys = getCluster().getFileSystem(0);
-writeFile(fileSys, file1, replicas, 1);
+final FileSystem fileSys = getCluster().getFileSystem(0);
+writeFile(fileSys, file, 

[1/2] hadoop git commit: HDFS-9390. Block management for maintenance states.

2016-10-17 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/trunk f5d923591 -> b61fb267b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61fb267/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index 63617ad..c125f45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -18,13 +18,19 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -32,6 +38,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
@@ -40,13 +48,23 @@ import org.junit.Test;
  * This class tests node maintenance.
  */
 public class TestMaintenanceState extends AdminStatesBaseTest {
-  public static final Log LOG = LogFactory.getLog(TestMaintenanceState.class);
-  static private final long EXPIRATION_IN_MS = 500;
+  public static final Logger LOG =
+  LoggerFactory.getLogger(TestMaintenanceState.class);
+  static private final long EXPIRATION_IN_MS = 50;
+  private int minMaintenanceR =
+  DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_DEFAULT;
 
   public TestMaintenanceState() {
 setUseCombinedHostFileManager();
   }
 
+  void setMinMaintenanceR(int minMaintenanceR) {
+this.minMaintenanceR = minMaintenanceR;
+getConf().setInt(
+DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY,
+minMaintenanceR);
+  }
+
   /**
* Verify a node can transition from AdminStates.ENTERING_MAINTENANCE to
* AdminStates.NORMAL.
@@ -55,21 +73,25 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
   public void testTakeNodeOutOfEnteringMaintenance() throws Exception {
 LOG.info("Starting testTakeNodeOutOfEnteringMaintenance");
 final int replicas = 1;
-final int numNamenodes = 1;
-final int numDatanodes = 1;
-final Path file1 = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
+final Path file = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
 
-startCluster(numNamenodes, numDatanodes);
+startCluster(1, 1);
 
-FileSystem fileSys = getCluster().getFileSystem(0);
-writeFile(fileSys, file1, replicas, 1);
+final FileSystem fileSys = getCluster().getFileSystem(0);
+final FSNamesystem ns = getCluster().getNamesystem(0);
+writeFile(fileSys, file, replicas, 1);
 
-DatanodeInfo nodeOutofService = takeNodeOutofService(0,
+final DatanodeInfo nodeOutofService = takeNodeOutofService(0,
 null, Long.MAX_VALUE, null, AdminStates.ENTERING_MAINTENANCE);
 
+// When node is in ENTERING_MAINTENANCE state, it can still serve read
+// requests
+assertNull(checkWithRetry(ns, fileSys, file, replicas, null,
+nodeOutofService));
+
 putNodeInService(0, nodeOutofService.getDatanodeUuid());
 
-cleanupFile(fileSys, file1);
+cleanupFile(fileSys, file);
   }
 
   /**
@@ -80,23 +102,21 @@ public class TestMaintenanceState extends 
AdminStatesBaseTest {
   public void testEnteringMaintenanceExpiration() throws Exception {
 LOG.info("Starting testEnteringMaintenanceExpiration");
 final int replicas = 1;
-final int numNamenodes = 1;
-final int numDatanodes = 1;
-final Path file1 = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
+final Path file = new Path("/testEnteringMaintenanceExpiration.dat");
 
-startCluster(numNamenodes, numDatanodes);
+startCluster(1, 1);
 
-FileSystem fileSys = getCluster().getFileSystem(0);
-writeFile(fileSys, file1, replicas, 1);
+final FileSystem fileSys = getCluster().getFileSystem(0);
+writeFile(fileSys, file, replicas, 

[2/2] hadoop git commit: HDFS-9390. Block management for maintenance states.

2016-10-17 Thread mingma
HDFS-9390. Block management for maintenance states.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d55a7f89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d55a7f89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d55a7f89

Branch: refs/heads/branch-2
Commit: d55a7f893584acee0c3bfd89e89f8002310dcc3f
Parents: a5a56c3
Author: Ming Ma 
Authored: Mon Oct 17 17:46:29 2016 -0700
Committer: Ming Ma 
Committed: Mon Oct 17 17:46:29 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   4 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  54 +-
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  11 +-
 .../server/blockmanagement/BlockManager.java| 260 +--
 .../BlockPlacementPolicyDefault.java|   4 +-
 .../CacheReplicationMonitor.java|   2 +-
 .../blockmanagement/DatanodeDescriptor.java |  35 +-
 .../server/blockmanagement/DatanodeManager.java |  47 +-
 .../blockmanagement/DecommissionManager.java| 145 ++--
 .../blockmanagement/HeartbeatManager.java   |  23 +-
 .../server/blockmanagement/NumberReplicas.java  |  39 +-
 .../blockmanagement/StorageTypeStats.java   |   8 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   9 +-
 .../src/main/resources/hdfs-default.xml |   7 +
 .../apache/hadoop/hdfs/AdminStatesBaseTest.java |  20 +-
 .../apache/hadoop/hdfs/TestDecommission.java|   2 +-
 .../hadoop/hdfs/TestMaintenanceState.java   | 775 +--
 .../blockmanagement/TestBlockManager.java   |   4 +-
 .../namenode/TestDecommissioningStatus.java |  48 +-
 .../namenode/TestNamenodeCapacityReport.java|  78 +-
 .../hadoop/hdfs/util/HostsFileWriter.java   |   1 +
 21 files changed, 1219 insertions(+), 357 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d55a7f89/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index ca2fb3e..6b6a4e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -213,6 +213,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY 
=
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY;
   public static final int 
DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
+  public static final String  DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY =
+  "dfs.namenode.maintenance.replication.min";
+  public static final int DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_DEFAULT
+  = 1;
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY;
   public static final int DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d55a7f89/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index a2d3d5d..e0a4e18 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -124,49 +124,59 @@ public class DFSUtil {
 return array;
   }
 
+
   /**
-   * Compartor for sorting DataNodeInfo[] based on decommissioned states.
-   * Decommissioned nodes are moved to the end of the array on sorting with
-   * this compartor.
+   * Comparator for sorting DataNodeInfo[] based on
+   * decommissioned and entering_maintenance states.
*/
-  public static final Comparator DECOM_COMPARATOR = 
-new Comparator() {
-  @Override
-  public int compare(DatanodeInfo a, DatanodeInfo b) {
-return a.isDecommissioned() == b.isDecommissioned() ? 0 : 
-  a.isDecommissioned() ? 1 : -1;
+  public static class ServiceComparator implements Comparator {
+@Override
+public int compare(DatanodeInfo a, DatanodeInfo b) {
+  // Decommissioned nodes will still be moved to the end of the list
+  if (a.isDecommissioned()) {
+return b.isDecommissioned() ? 0 : 1;
+  } 

[2/2] hadoop git commit: HDFS-9390. Block management for maintenance states.

2016-10-17 Thread mingma
HDFS-9390. Block management for maintenance states.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b61fb267
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b61fb267
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b61fb267

Branch: refs/heads/trunk
Commit: b61fb267b92b2736920b4bd0c673d31e7632ebb9
Parents: f5d9235
Author: Ming Ma 
Authored: Mon Oct 17 17:45:41 2016 -0700
Committer: Ming Ma 
Committed: Mon Oct 17 17:45:41 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   5 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  53 +-
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  11 +-
 .../server/blockmanagement/BlockManager.java| 249 --
 .../BlockPlacementPolicyDefault.java|   4 +-
 .../CacheReplicationMonitor.java|   2 +-
 .../blockmanagement/DatanodeDescriptor.java |  35 +-
 .../server/blockmanagement/DatanodeManager.java |  47 +-
 .../blockmanagement/DecommissionManager.java| 142 +++-
 .../blockmanagement/ErasureCodingWork.java  |  16 +-
 .../blockmanagement/HeartbeatManager.java   |  23 +-
 .../blockmanagement/LowRedundancyBlocks.java|  47 +-
 .../server/blockmanagement/NumberReplicas.java  |  30 +-
 .../blockmanagement/StorageTypeStats.java   |   8 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   9 +-
 .../src/main/resources/hdfs-default.xml |   7 +
 .../apache/hadoop/hdfs/AdminStatesBaseTest.java |  20 +-
 .../apache/hadoop/hdfs/TestDecommission.java|   2 +-
 .../hadoop/hdfs/TestMaintenanceState.java   | 775 +--
 .../blockmanagement/TestBlockManager.java   |   8 +-
 .../namenode/TestDecommissioningStatus.java |  57 +-
 .../namenode/TestNamenodeCapacityReport.java|  78 +-
 .../hadoop/hdfs/util/HostsFileWriter.java   |   1 +
 23 files changed, 1240 insertions(+), 389 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61fb267/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 10c0ad6..d54c109 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -220,6 +220,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.namenode.reconstruction.pending.timeout-sec";
   public static final int 
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
 
+  public static final String  DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY =
+  "dfs.namenode.maintenance.replication.min";
+  public static final int DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_DEFAULT
+  = 1;
+
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY;
   public static final int DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61fb267/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 83870cf..23166e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -124,48 +124,57 @@ public class DFSUtil {
   }
 
   /**
-   * Compartor for sorting DataNodeInfo[] based on decommissioned states.
-   * Decommissioned nodes are moved to the end of the array on sorting with
-   * this compartor.
+   * Comparator for sorting DataNodeInfo[] based on
+   * decommissioned and entering_maintenance states.
*/
-  public static final Comparator DECOM_COMPARATOR = 
-new Comparator() {
-  @Override
-  public int compare(DatanodeInfo a, DatanodeInfo b) {
-return a.isDecommissioned() == b.isDecommissioned() ? 0 : 
-  a.isDecommissioned() ? 1 : -1;
+  public static class ServiceComparator implements Comparator {
+@Override
+public int compare(DatanodeInfo a, DatanodeInfo b) {
+  // Decommissioned nodes will still be moved to the end of the list
+  if (a.isDecommissioned()) {
+return b.isDecommissioned() ? 0 : 1;
+  } else if 

hadoop git commit: HADOOP-13034. Log message about input options in distcp lacks some items (Takashi Ohnishi via aw)

2016-10-17 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e02c756f1 -> a5a56c356


HADOOP-13034. Log message about input options in distcp lacks some items 
(Takashi Ohnishi via aw)

(cherry picked from commit 422c73a8657d8699920f7db13d4be200e16c4272)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5a56c35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5a56c35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5a56c35

Branch: refs/heads/branch-2
Commit: a5a56c3564d486f8df258e64ad5c243395def12a
Parents: e02c756
Author: Allen Wittenauer 
Authored: Tue Jun 28 07:21:04 2016 -0700
Committer: Yongjun Zhang 
Committed: Mon Oct 17 16:35:32 2016 -0700

--
 .../src/main/java/org/apache/hadoop/tools/DistCpOptions.java | 4 
 .../src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java | 1 +
 .../src/test/java/org/apache/hadoop/tools/TestOptionsParser.java | 1 +
 3 files changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a56c35/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
index b81bc03..a3af917 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
@@ -680,6 +680,10 @@ public class DistCpOptions {
 ", deleteMissing=" + deleteMissing +
 ", ignoreFailures=" + ignoreFailures +
 ", overwrite=" + overwrite +
+", append=" + append +
+", useDiff=" + useDiff +
+", fromSnapshot=" + fromSnapshot +
+", toSnapshot=" + toSnapshot +
 ", skipCRC=" + skipCRC +
 ", blocking=" + blocking +
 ", numListstatusThreads=" + numListstatusThreads +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a56c35/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java
index 5a56cc3..7f15a60 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java
@@ -305,6 +305,7 @@ public class TestDistCpOptions {
 DistCpOptions option = new DistCpOptions(new Path("abc"), new Path("xyz"));
 final String val = "DistCpOptions{atomicCommit=false, syncFolder=false, "
 + "deleteMissing=false, ignoreFailures=false, overwrite=false, "
++ "append=false, useDiff=false, fromSnapshot=null, toSnapshot=null, "
 + "skipCRC=false, blocking=true, numListstatusThreads=0, maxMaps=20, "
 + "mapBandwidth=100, sslConfigurationFile='null', "
 + "copyStrategy='uniformsize', preserveStatus=[], "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a56c35/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
index 4dd05de..907fc24 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
@@ -400,6 +400,7 @@ public class TestOptionsParser {
 DistCpOptions option = new DistCpOptions(new Path("abc"), new Path("xyz"));
 String val = "DistCpOptions{atomicCommit=false, syncFolder=false, "
 + "deleteMissing=false, ignoreFailures=false, overwrite=false, "
++ "append=false, useDiff=false, fromSnapshot=null, toSnapshot=null, "
 + "skipCRC=false, blocking=true, numListstatusThreads=0, maxMaps=20, "
 + "mapBandwidth=100, sslConfigurationFile='null', "
 + "copyStrategy='uniformsize', preserveStatus=[], "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] hadoop git commit: HDFS-10827. When there are unrecoverable ec block groups, Namenode Web UI doesn't show the block names. Contributed by Takanobu Asanuma.

2016-10-17 Thread umamahesh
HDFS-10827. When there are unrecoverable ec block groups, Namenode Web UI 
doesn't show the block names. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/adb96e10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/adb96e10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/adb96e10

Branch: refs/heads/HDFS-10285
Commit: adb96e109f1ab4a2c3d469e716c084d0a891b951
Parents: 0007360
Author: Jing Zhao 
Authored: Fri Oct 14 13:21:53 2016 -0700
Committer: Jing Zhao 
Committed: Fri Oct 14 13:21:53 2016 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../server/namenode/TestNameNodeMXBean.java | 105 +++
 2 files changed, 106 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/adb96e10/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 8c59186..563682f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4999,7 +4999,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 BlockInfo blk = blkIterator.next();
 final INodeFile inode = getBlockCollection(blk);
 skip++;
-if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) 
{
+if (inode != null) {
   String src = inode.getFullPathName();
   if (src.startsWith(path)){
 corruptFiles.add(new CorruptFileBlockInfo(src, blk));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/adb96e10/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index ac97a36..47f1c85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -17,35 +17,48 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.common.base.Supplier;
 import com.google.common.util.concurrent.Uninterruptibles;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
 import org.apache.hadoop.net.ServerSocketUtil;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.VersionInfo;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mortbay.util.ajax.JSON;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 import java.io.File;
+import java.io.IOException;
 import 

[03/50] hadoop git commit: HADOOP-13684. Snappy may complain Hadoop is built without snappy if libhadoop is not found. Contributed by Wei-Chiu Chuang.

2016-10-17 Thread umamahesh
HADOOP-13684. Snappy may complain Hadoop is built without snappy if libhadoop 
is not found. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b32b142
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b32b142
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b32b142

Branch: refs/heads/HDFS-10285
Commit: 4b32b1420d98ea23460d05ae94f2698109b3d6f7
Parents: 2fb392a
Author: Wei-Chiu Chuang 
Authored: Tue Oct 11 13:21:33 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Oct 11 13:21:33 2016 -0700

--
 .../apache/hadoop/io/compress/SnappyCodec.java  | 30 +++-
 1 file changed, 16 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b32b142/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
index 2a9c5d0..20a4cd6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
@@ -60,20 +60,22 @@ public class SnappyCodec implements Configurable, 
CompressionCodec, DirectDecomp
* Are the native snappy libraries loaded & initialized?
*/
   public static void checkNativeCodeLoaded() {
-  if (!NativeCodeLoader.isNativeCodeLoaded() ||
-  !NativeCodeLoader.buildSupportsSnappy()) {
-throw new RuntimeException("native snappy library not available: " +
-"this version of libhadoop was built without " +
-"snappy support.");
-  }
-  if (!SnappyCompressor.isNativeCodeLoaded()) {
-throw new RuntimeException("native snappy library not available: " +
-"SnappyCompressor has not been loaded.");
-  }
-  if (!SnappyDecompressor.isNativeCodeLoaded()) {
-throw new RuntimeException("native snappy library not available: " +
-"SnappyDecompressor has not been loaded.");
-  }
+if (!NativeCodeLoader.buildSupportsSnappy()) {
+  throw new RuntimeException("native snappy library not available: " +
+  "this version of libhadoop was built without " +
+  "snappy support.");
+}
+if (!NativeCodeLoader.isNativeCodeLoaded()) {
+  throw new RuntimeException("Failed to load libhadoop.");
+}
+if (!SnappyCompressor.isNativeCodeLoaded()) {
+  throw new RuntimeException("native snappy library not available: " +
+  "SnappyCompressor has not been loaded.");
+}
+if (!SnappyDecompressor.isNativeCodeLoaded()) {
+  throw new RuntimeException("native snappy library not available: " +
+  "SnappyDecompressor has not been loaded.");
+}
   }
   
   public static boolean isNativeCodeLoaded() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] hadoop git commit: YARN-5677. RM should transition to standby when connection is lost for an extended period. (Daniel Templeton via kasha)

2016-10-17 Thread umamahesh
YARN-5677. RM should transition to standby when connection is lost for an 
extended period. (Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6476934a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6476934a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6476934a

Branch: refs/heads/HDFS-10285
Commit: 6476934ae5de1be7988ab198b673d82fe0f006e3
Parents: 6378845
Author: Karthik Kambatla 
Authored: Tue Oct 11 22:07:10 2016 -0700
Committer: Karthik Kambatla 
Committed: Tue Oct 11 22:07:10 2016 -0700

--
 .../resourcemanager/EmbeddedElectorService.java |  59 +-
 .../resourcemanager/TestRMEmbeddedElector.java  | 191 +++
 2 files changed, 244 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6476934a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
index 72327e8..88d2e10 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/EmbeddedElectorService.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -39,6 +40,8 @@ import org.apache.zookeeper.data.ACL;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Timer;
+import java.util.TimerTask;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -54,6 +57,10 @@ public class EmbeddedElectorService extends AbstractService
 
   private byte[] localActiveNodeInfo;
   private ActiveStandbyElector elector;
+  private long zkSessionTimeout;
+  private Timer zkDisconnectTimer;
+  @VisibleForTesting
+  final Object zkDisconnectLock = new Object();
 
   EmbeddedElectorService(RMContext rmContext) {
 super(EmbeddedElectorService.class.getName());
@@ -80,7 +87,7 @@ public class EmbeddedElectorService extends AbstractService
 YarnConfiguration.DEFAULT_AUTO_FAILOVER_ZK_BASE_PATH);
 String electionZNode = zkBasePath + "/" + clusterId;
 
-long zkSessionTimeout = conf.getLong(YarnConfiguration.RM_ZK_TIMEOUT_MS,
+zkSessionTimeout = conf.getLong(YarnConfiguration.RM_ZK_TIMEOUT_MS,
 YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS);
 
 List zkAcls = RMZKUtils.getZKAcls(conf);
@@ -123,6 +130,8 @@ public class EmbeddedElectorService extends AbstractService
 
   @Override
   public void becomeActive() throws ServiceFailedException {
+cancelDisconnectTimer();
+
 try {
   rmContext.getRMAdminService().transitionToActive(req);
 } catch (Exception e) {
@@ -132,6 +141,8 @@ public class EmbeddedElectorService extends AbstractService
 
   @Override
   public void becomeStandby() {
+cancelDisconnectTimer();
+
 try {
   rmContext.getRMAdminService().transitionToStandby(req);
 } catch (Exception e) {
@@ -139,13 +150,49 @@ public class EmbeddedElectorService extends 
AbstractService
 }
   }
 
+  /**
+   * Stop the disconnect timer.  Any running tasks will be allowed to complete.
+   */
+  private void cancelDisconnectTimer() {
+synchronized (zkDisconnectLock) {
+  if (zkDisconnectTimer != null) {
+zkDisconnectTimer.cancel();
+zkDisconnectTimer = null;
+  }
+}
+  }
+
+  /**
+   * When the ZK client loses contact with ZK, this method will be called to
+   * allow the RM to react. Because the loss of connection can be noticed
+   * before the session timeout happens, it is undesirable to transition
+   * immediately. Instead the method starts a timer that will wait
+   * {@link YarnConfiguration#RM_ZK_TIMEOUT_MS} milliseconds before
+   * initiating the transition into standby state.
+   */
   @Override
   public void enterNeutralMode() {
-/**
- * Possibly due to transient connection issues. Do nothing.
- * TODO: Might want to keep track of how long in 

[10/50] hadoop git commit: YARN-4464. Lower the default max applications stored in the RM and store. (Daniel Templeton via kasha)

2016-10-17 Thread umamahesh
YARN-4464. Lower the default max applications stored in the RM and store. 
(Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6378845f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6378845f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6378845f

Branch: refs/heads/HDFS-10285
Commit: 6378845f9ef789c3fda862c43bcd498aa3f35068
Parents: 7ba7092
Author: Karthik Kambatla 
Authored: Tue Oct 11 21:41:58 2016 -0700
Committer: Karthik Kambatla 
Committed: Tue Oct 11 21:42:08 2016 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 20 
 .../src/main/resources/yarn-default.xml |  4 ++--
 .../server/resourcemanager/RMAppManager.java|  2 +-
 3 files changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6378845f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 4d43357..3bd0dcc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -719,17 +719,29 @@ public class YarnConfiguration extends Configuration {
   + "leveldb-state-store.compaction-interval-secs";
   public static final long DEFAULT_RM_LEVELDB_COMPACTION_INTERVAL_SECS = 3600;
 
-  /** The maximum number of completed applications RM keeps. */ 
+  /**
+   * The maximum number of completed applications RM keeps. By default equals
+   * to {@link #DEFAULT_RM_MAX_COMPLETED_APPLICATIONS}.
+   */
   public static final String RM_MAX_COMPLETED_APPLICATIONS =
 RM_PREFIX + "max-completed-applications";
-  public static final int DEFAULT_RM_MAX_COMPLETED_APPLICATIONS = 1;
+  public static final int DEFAULT_RM_MAX_COMPLETED_APPLICATIONS = 1000;
 
   /**
-   * The maximum number of completed applications RM state store keeps, by
-   * default equals to DEFAULT_RM_MAX_COMPLETED_APPLICATIONS
+   * The maximum number of completed applications RM state store keeps. By
+   * default equals to value of {@link #RM_MAX_COMPLETED_APPLICATIONS}.
*/
   public static final String RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS =
   RM_PREFIX + "state-store.max-completed-applications";
+  /**
+   * The default value for
+   * {@code yarn.resourcemanager.state-store.max-completed-applications}.
+   * @deprecated This default value is ignored and will be removed in a future
+   * release. The default value of
+   * {@code yarn.resourcemanager.state-store.max-completed-applications} is the
+   * value of {@link #RM_MAX_COMPLETED_APPLICATIONS}.
+   */
+  @Deprecated
   public static final int DEFAULT_RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS =
   DEFAULT_RM_MAX_COMPLETED_APPLICATIONS;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6378845f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 524afec..f37c689 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -417,7 +417,7 @@
 the applications remembered in RM memory.
 Any values larger than ${yarn.resourcemanager.max-completed-applications} 
will
 be reset to ${yarn.resourcemanager.max-completed-applications}.
-Note that this value impacts the RM recovery performance.Typically,
+Note that this value impacts the RM recovery performance. Typically,
 a smaller value indicates better performance on RM recovery.
 
 yarn.resourcemanager.state-store.max-completed-applications
@@ -687,7 +687,7 @@
   
 The maximum number of completed applications RM keeps. 

 yarn.resourcemanager.max-completed-applications
-1
+1000
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6378845f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java

[43/50] hadoop git commit: HADOOP-13722. Code cleanup -- ViewFileSystem and InodeTree. Contributed by Manoj Govindassamy.

2016-10-17 Thread umamahesh
HADOOP-13722. Code cleanup -- ViewFileSystem and InodeTree. Contributed by 
Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f4afc81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f4afc81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f4afc81

Branch: refs/heads/HDFS-10285
Commit: 0f4afc81009129bbee89d5b6cf22c8dda612d223
Parents: 412c4c9
Author: Andrew Wang 
Authored: Mon Oct 17 13:15:11 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 17 13:15:11 2016 -0700

--
 .../org/apache/hadoop/fs/viewfs/InodeTree.java  | 206 +--
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  91 
 .../hadoop/fs/viewfs/TestViewFsConfig.java  |  42 ++--
 3 files changed, 155 insertions(+), 184 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f4afc81/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index 8c42cdf..a485a3b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -36,47 +36,45 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
-
 /**
  * InodeTree implements a mount-table as a tree of inodes.
  * It is used to implement ViewFs and ViewFileSystem.
  * In order to use it the caller must subclass it and implement
  * the abstract methods {@link #getTargetFileSystem(INodeDir)}, etc.
- * 
+ *
  * The mountable is initialized from the config variables as 
  * specified in {@link ViewFs}
  *
  * @param  is AbstractFileSystem or FileSystem
- * 
- * The three main methods are
- * {@link #InodeTreel(Configuration)} // constructor
+ *
+ * The two main methods are
  * {@link #InodeTree(Configuration, String)} // constructor
  * {@link #resolve(String, boolean)} 
  */
 
 @InterfaceAudience.Private
-@InterfaceStability.Unstable 
+@InterfaceStability.Unstable
 abstract class InodeTree {
-  static enum ResultKind {isInternalDir, isExternalDir;};
+  enum ResultKind {
+INTERNAL_DIR,
+EXTERNAL_DIR
+  }
+
   static final Path SlashPath = new Path("/");
-  
-  final INodeDir root; // the root of the mount table
-  
-  final String homedirPrefix; // the homedir config value for this mount table
-  
-  List mountPoints = new ArrayList();
-  
-  
+  private final INodeDir root; // the root of the mount table
+  private final String homedirPrefix; // the homedir for this mount table
+  private List mountPoints = new ArrayList();
+
   static class MountPoint {
 String src;
 INodeLink target;
+
 MountPoint(String srcPath, INodeLink mountLink) {
   src = srcPath;
   target = mountLink;
 }
-
   }
-  
+
   /**
* Breaks file path into component names.
* @param path
@@ -84,18 +82,19 @@ abstract class InodeTree {
*/
   static String[] breakIntoPathComponents(final String path) {
 return path == null ? null : path.split(Path.SEPARATOR);
-  } 
-  
+  }
+
   /**
* Internal class for inode tree
* @param 
*/
   abstract static class INode {
 final String fullPath; // the full path to the root
+
 public INode(String pathToNode, UserGroupInformation aUgi) {
   fullPath = pathToNode;
 }
-  };
+  }
 
   /**
* Internal class to represent an internal dir of the mount table
@@ -105,37 +104,28 @@ abstract class InodeTree {
 final Map children = new HashMap();
 T InodeDirFs =  null; // file system of this internal directory of mountT
 boolean isRoot = false;
-
+
 INodeDir(final String pathToNode, final UserGroupInformation aUgi) {
   super(pathToNode, aUgi);
 }
 
-INode resolve(final String pathComponent) throws FileNotFoundException {
-  

[16/50] hadoop git commit: HDFS-10986. DFSAdmin should log detailed error message if any. Contributed by MingLiang Liu

2016-10-17 Thread umamahesh
HDFS-10986. DFSAdmin should log detailed error message if any. Contributed by 
MingLiang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12912540
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12912540
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12912540

Branch: refs/heads/HDFS-10285
Commit: 129125404244f35ee63b8f0491a095371685e9ba
Parents: 9454dc5
Author: Brahma Reddy Battula 
Authored: Thu Oct 13 21:39:50 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Oct 13 22:05:00 2016 +0530

--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   8 +-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 106 +--
 2 files changed, 51 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12912540/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 32401dc..a60f24b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -936,8 +936,7 @@ public class DFSAdmin extends FsShell {
   System.out.println("Balancer bandwidth is " + bandwidth
   + " bytes per second.");
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2207,7 +2206,7 @@ public class DFSAdmin extends FsShell {
   dnProxy.evictWriters();
   System.out.println("Requested writer eviction to datanode " + dn);
 } catch (IOException ioe) {
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }
@@ -2218,8 +2217,7 @@ public class DFSAdmin extends FsShell {
   DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
   System.out.println(dnInfo.getDatanodeLocalReport());
 } catch (IOException ioe) {
-  System.err.println("Datanode unreachable.");
-  return -1;
+  throw new IOException("Datanode unreachable. " + ioe, ioe);
 }
 return 0;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12912540/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index b49f73d..dca42ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
@@ -79,6 +80,7 @@ public class TestDFSAdmin {
   @Before
   public void setUp() throws Exception {
 conf = new Configuration();
+conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
 restartCluster();
 
 admin = new DFSAdmin();
@@ -116,7 +118,7 @@ public class TestDFSAdmin {
 if (cluster != null) {
   cluster.shutdown();
 }
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
 cluster.waitActive();
 datanode = cluster.getDataNodes().get(0);
 namenode = cluster.getNameNode();
@@ -171,70 +173,58 @@ public class TestDFSAdmin {
   @Test(timeout = 3)
   public void testGetDatanodeInfo() throws Exception {
 redirectStream();
-final Configuration dfsConf = new HdfsConfiguration();
-final int numDn = 2;
-
-/* init cluster */
-try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
-.numDataNodes(numDn).build()) {
-
-  miniCluster.waitActive();
-  assertEquals(numDn, miniCluster.getDataNodes().size());
-  final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+final DFSAdmin dfsAdmin = new DFSAdmin(conf);
 
-  /* init reused vars */
-  List outs = null;
-  int ret;
-

[49/50] hadoop git commit: HDFS-10800: [SPS]: Daemon thread in Namenode to find blocks placed in other storage than what the policy specifies. Contributed by Uma Maheswara Rao G

2016-10-17 Thread umamahesh
HDFS-10800: [SPS]: Daemon thread in Namenode to find blocks placed in other 
storage than what the policy specifies. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07299e26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07299e26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07299e26

Branch: refs/heads/HDFS-10285
Commit: 07299e2605067d1d9edc04b2cb8ad5cd42a78e56
Parents: 8aa5e5a
Author: Uma Maheswara Rao G 
Authored: Fri Sep 23 13:41:29 2016 -0700
Committer: Uma Maheswara Rao G 
Committed: Mon Oct 17 15:01:55 2016 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  41 ++
 .../server/blockmanagement/BlockManager.java|  20 +
 .../blockmanagement/DatanodeDescriptor.java |  38 ++
 .../server/blockmanagement/DatanodeManager.java |   7 +
 .../datanode/StoragePolicySatisfyWorker.java|  29 +-
 .../namenode/BlockStorageMovementNeeded.java|  53 +++
 .../server/namenode/StoragePolicySatisfier.java | 397 +++
 .../protocol/BlockStorageMovementCommand.java   |  11 +-
 .../TestStoragePolicySatisfyWorker.java |  24 +-
 .../namenode/TestStoragePolicySatisfier.java| 209 ++
 10 files changed, 797 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07299e26/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 83870cf..39ee703 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -48,6 +48,7 @@ import java.util.Collection;
 import java.util.Comparator;
 import java.util.Date;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -68,6 +69,7 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -1574,4 +1576,43 @@ public class DFSUtil {
 .createKeyProviderCryptoExtension(keyProvider);
 return cryptoProvider;
   }
+
+  /**
+   * Remove the overlap between the expected types and the existing types.
+   *
+   * @param expected
+   *  - Expected storage types list.
+   * @param existing
+   *  - Existing storage types list.
+   * @param ignoreNonMovable
+   *  ignore non-movable storage types by removing them from both
+   *  expected and existing storage type list to prevent non-movable
+   *  storage from being moved.
+   * @returns if the existing types or the expected types is empty after
+   *  removing the overlap.
+   */
+  public static boolean removeOverlapBetweenStorageTypes(
+  List expected,
+  List existing, boolean ignoreNonMovable) {
+for (Iterator i = existing.iterator(); i.hasNext();) {
+  final StorageType t = i.next();
+  if (expected.remove(t)) {
+i.remove();
+  }
+}
+if (ignoreNonMovable) {
+  removeNonMovable(existing);
+  removeNonMovable(expected);
+}
+return expected.isEmpty() || existing.isEmpty();
+  }
+
+  private static void removeNonMovable(List types) {
+for (Iterator i = types.iterator(); i.hasNext();) {
+  final StorageType t = i.next();
+  if (!t.isMovable()) {
+i.remove();
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/07299e26/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7b13add..ae21593 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -86,6 +86,8 @@ import 
org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 

[44/50] hadoop git commit: HADOOP-13724. Fix a few typos in site markdown documents. Contributed by Ding Fei.

2016-10-17 Thread umamahesh
HADOOP-13724. Fix a few typos in site markdown documents. Contributed by Ding 
Fei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/987ee511
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/987ee511
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/987ee511

Branch: refs/heads/HDFS-10285
Commit: 987ee51141a15d3f4d1df4dc792a192b92b87b5f
Parents: 0f4afc8
Author: Andrew Wang 
Authored: Mon Oct 17 13:25:58 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 17 13:25:58 2016 -0700

--
 .../src/site/markdown/ClusterSetup.md   |  2 +-
 .../src/site/markdown/Compatibility.md  | 16 +--
 .../site/markdown/InterfaceClassification.md| 28 ++--
 .../src/site/markdown/filesystem/filesystem.md  | 17 ++--
 .../markdown/filesystem/fsdatainputstream.md| 16 +--
 .../site/markdown/filesystem/introduction.md| 12 -
 .../src/site/markdown/filesystem/model.md   |  7 ++---
 .../src/site/markdown/filesystem/notation.md|  2 +-
 .../src/site/markdown/filesystem/testing.md |  4 +--
 .../src/site/markdown/HadoopArchives.md.vm  |  2 +-
 10 files changed, 53 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/987ee511/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
index f222769..56b43e6 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
@@ -35,7 +35,7 @@ Installation
 
 Installing a Hadoop cluster typically involves unpacking the software on all 
the machines in the cluster or installing it via a packaging system as 
appropriate for your operating system. It is important to divide up the 
hardware into functions.
 
-Typically one machine in the cluster is designated as the NameNode and another 
machine the as ResourceManager, exclusively. These are the masters. Other 
services (such as Web App Proxy Server and MapReduce Job History server) are 
usually run either on dedicated hardware or on shared infrastrucutre, depending 
upon the load.
+Typically one machine in the cluster is designated as the NameNode and another 
machine as the ResourceManager, exclusively. These are the masters. Other 
services (such as Web App Proxy Server and MapReduce Job History server) are 
usually run either on dedicated hardware or on shared infrastructure, depending 
upon the load.
 
 The rest of the machines in the cluster act as both DataNode and NodeManager. 
These are the workers.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/987ee511/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index d7827b5..05b18b5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -68,7 +68,7 @@ Wire compatibility concerns data being transmitted over the 
wire between Hadoop
  Use Cases
 
 * Client-Server compatibility is required to allow users to continue using the 
old clients even after upgrading the server (cluster) to a later version (or 
vice versa). For example, a Hadoop 2.1.0 client talking to a Hadoop 2.3.0 
cluster.
-* Client-Server compatibility is also required to allow users to upgrade the 
client before upgrading the server (cluster). For example, a Hadoop 2.4.0 
client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side 
bug fixes ahead of full cluster upgrades. Note that new cluster features 
invoked by new client APIs or shell commands will not be usable. YARN 
applications that attempt to use new APIs (including new fields in data 
structures) that have not yet deployed to the cluster can expect link 
exceptions.
+* Client-Server compatibility is also required to allow users to upgrade the 
client before upgrading the server (cluster). For example, a Hadoop 2.4.0 
client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side 
bug fixes ahead of full cluster upgrades. Note that new cluster features 
invoked by new client APIs or shell commands will not be usable. YARN 
applications that attempt to use new APIs (including new fields in data 
structures) that have not yet been deployed to the cluster can expect link 

[20/50] hadoop git commit: HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin.

2016-10-17 Thread umamahesh
HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdce5150
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdce5150
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdce5150

Branch: refs/heads/HDFS-10285
Commit: fdce515091f0a61ffd6c9ae464a68447dedf1124
Parents: 008122b
Author: Andrew Wang 
Authored: Thu Oct 13 11:41:37 2016 -0700
Committer: Andrew Wang 
Committed: Thu Oct 13 11:41:37 2016 -0700

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdce5150/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 696b2aa..19f3178 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,6 +86,8 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
+DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
+
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -98,6 +100,7 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
+waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -105,7 +108,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -182,7 +185,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -199,7 +202,8 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForBlocksToDelete() throws Exception {
+  private void waitForNumPendingDeletionBlocks(int numBlocks)
+  throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -207,7 +211,8 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks()
+  == numBlocks) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] hadoop git commit: YARN-5699. Retrospect yarn entity fields which are publishing in events info fields. Contributed by Rohith Sharma K S.

2016-10-17 Thread umamahesh
YARN-5699. Retrospect yarn entity fields which are publishing in events info 
fields. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f304b0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f304b0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f304b0c

Branch: refs/heads/HDFS-10285
Commit: 1f304b0c7f261369dd68839507bb609a949965ad
Parents: 5f4ae85
Author: Sangjin Lee 
Authored: Sat Oct 15 13:54:40 2016 -0700
Committer: Sangjin Lee 
Committed: Sat Oct 15 13:54:40 2016 -0700

--
 ...pplicationHistoryManagerOnTimelineStore.java |  69 ++---
 ...pplicationHistoryManagerOnTimelineStore.java |  38 +++
 .../metrics/AppAttemptMetricsConstants.java |  16 +--
 .../metrics/ContainerMetricsConstants.java  |  21 ++--
 .../timelineservice/NMTimelinePublisher.java|  34 ---
 .../metrics/TimelineServiceV1Publisher.java |  44 
 .../metrics/TimelineServiceV2Publisher.java | 101 +--
 .../metrics/TestSystemMetricsPublisher.java |  40 
 8 files changed, 186 insertions(+), 177 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f304b0c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index feeafdd..6e6576a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -463,21 +463,21 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
   if (eventInfo == null) {
 continue;
   }
-  if 
(eventInfo.containsKey(AppAttemptMetricsConstants.HOST_EVENT_INFO)) {
+  if (eventInfo.containsKey(AppAttemptMetricsConstants.HOST_INFO)) {
 host =
-eventInfo.get(AppAttemptMetricsConstants.HOST_EVENT_INFO)
+eventInfo.get(AppAttemptMetricsConstants.HOST_INFO)
 .toString();
   }
   if (eventInfo
-  .containsKey(AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.RPC_PORT_INFO)) {
 rpcPort = (Integer) eventInfo.get(
-AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO);
+AppAttemptMetricsConstants.RPC_PORT_INFO);
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.MASTER_CONTAINER_INFO)) {
 amContainerId =
 ContainerId.fromString(eventInfo.get(
-AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)
+AppAttemptMetricsConstants.MASTER_CONTAINER_INFO)
 .toString());
   }
 } else if (event.getEventType().equals(
@@ -487,39 +487,40 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 continue;
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)) {
+  .containsKey(AppAttemptMetricsConstants.TRACKING_URL_INFO)) {
 trackingUrl =
 eventInfo.get(
-AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)
+AppAttemptMetricsConstants.TRACKING_URL_INFO)
 .toString();
   }
   if (eventInfo
-  
.containsKey(AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO)) {
+  .containsKey(
+  AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_INFO)) {
 originalTrackingUrl =
 eventInfo
 .get(
-

[28/50] hadoop git commit: HADOOP-13686. Adding additional unit test for Trash (I). Contributed by Weiwei Yang.

2016-10-17 Thread umamahesh
HADOOP-13686. Adding additional unit test for Trash (I). Contributed by Weiwei 
Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dbe663d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dbe663d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dbe663d5

Branch: refs/heads/HDFS-10285
Commit: dbe663d5241feea0c88a3a9391ad48a029001d94
Parents: 5a5a724
Author: Xiaoyu Yao 
Authored: Thu Oct 13 23:05:16 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Oct 13 23:05:16 2016 -0700

--
 .../apache/hadoop/fs/TrashPolicyDefault.java|  11 +-
 .../java/org/apache/hadoop/fs/TestTrash.java| 352 ++-
 2 files changed, 356 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbe663d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 7be..4f4c937 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -40,6 +40,8 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Time;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /** Provides a trash feature.  Files are moved to a user's trash
  * directory, a subdirectory of their home directory named ".Trash".  Files are
  * initially moved to a current sub-directory of the trash directory.
@@ -215,7 +217,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 return new Emptier(getConf(), emptierInterval);
   }
 
-  private class Emptier implements Runnable {
+  protected class Emptier implements Runnable {
 
 private Configuration conf;
 private long emptierInterval;
@@ -223,7 +225,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 Emptier(Configuration conf, long emptierInterval) throws IOException {
   this.conf = conf;
   this.emptierInterval = emptierInterval;
-  if (emptierInterval > deletionInterval || emptierInterval == 0) {
+  if (emptierInterval > deletionInterval || emptierInterval <= 0) {
 LOG.info("The configured checkpoint interval is " +
  (emptierInterval / MSECS_PER_MINUTE) + " minutes." +
  " Using an interval of " +
@@ -287,6 +289,11 @@ public class TrashPolicyDefault extends TrashPolicy {
 private long floor(long time, long interval) {
   return (time / interval) * interval;
 }
+
+@VisibleForTesting
+protected long getEmptierInterval() {
+  return this.emptierInterval/MSECS_PER_MINUTE;
+}
   }
 
   private void createCheckpoint(Path trashRoot, Date date) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbe663d5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 338aff6..7a5b25e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -29,13 +29,19 @@ import java.net.URI;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.util.HashSet;
+import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.TrashPolicyDefault.Emptier;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
+import org.junit.Before;
+import org.junit.Test;
 
 /**
  * This class tests commands from Trash.
@@ -45,6 +51,13 @@ public class TestTrash extends TestCase {
   private final static Path TEST_DIR = new Path(GenericTestUtils.getTempPath(
   "testTrash"));
 
+  @Before
+  public void setUp() throws IOException {
+// ensure each test initiates a FileSystem instance,
+// avoid getting an old instance from cache.
+FileSystem.closeAll();
+  }
+
   protected static Path mkdir(FileSystem fs, Path p) throws IOException {
 assertTrue(fs.mkdirs(p));
 

[36/50] hadoop git commit: HDFS-10558. DiskBalancer: Print the full path to plan file. Contributed by Xiaobing Zhou.

2016-10-17 Thread umamahesh
HDFS-10558. DiskBalancer: Print the full path to plan file. Contributed by 
Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30bb1970
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30bb1970
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30bb1970

Branch: refs/heads/HDFS-10285
Commit: 30bb1970cc27c1345871a35bcf1220e520c1804b
Parents: 76cc84e
Author: Anu Engineer 
Authored: Fri Oct 14 17:07:59 2016 -0700
Committer: Anu Engineer 
Committed: Fri Oct 14 17:07:59 2016 -0700

--
 .../server/diskbalancer/command/Command.java| 21 +-
 .../diskbalancer/command/HelpCommand.java   |  2 +-
 .../diskbalancer/command/PlanCommand.java   | 55 +++
 .../diskbalancer/command/ReportCommand.java | 11 +--
 .../hadoop/hdfs/tools/DiskBalancerCLI.java  | 27 +---
 .../diskbalancer/DiskBalancerTestUtil.java  | 72 
 .../server/diskbalancer/TestDiskBalancer.java   |  4 +-
 .../command/TestDiskBalancerCommand.java| 61 +++--
 8 files changed, 218 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30bb1970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index 2497669..11c8e7f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -53,6 +53,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URL;
@@ -82,6 +83,7 @@ public abstract class Command extends Configured {
   private FileSystem fs = null;
   private DiskBalancerCluster cluster = null;
   private int topNodes;
+  private PrintStream ps;
 
   private static final Path DEFAULT_LOG_DIR = new Path("/system/diskbalancer");
 
@@ -91,9 +93,25 @@ public abstract class Command extends Configured {
* Constructs a command.
*/
   public Command(Configuration conf) {
+this(conf, System.out);
+  }
+
+  /**
+   * Constructs a command.
+   */
+  public Command(Configuration conf, final PrintStream ps) {
 super(conf);
 // These arguments are valid for all commands.
 topNodes = 0;
+this.ps = ps;
+  }
+
+  /**
+   * Gets printing stream.
+   * @return print stream
+   */
+  PrintStream getPrintStream() {
+return ps;
   }
 
   /**
@@ -423,7 +441,8 @@ public abstract class Command extends Configured {
*
* @return Cluster.
*/
-  protected DiskBalancerCluster getCluster() {
+  @VisibleForTesting
+  DiskBalancerCluster getCluster() {
 return cluster;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30bb1970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
index c735299..f7c84e1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
@@ -78,7 +78,7 @@ public class HelpCommand extends Command {
   command = new CancelCommand(getConf());
   break;
 case DiskBalancerCLI.REPORT:
-  command = new ReportCommand(getConf(), null);
+  command = new ReportCommand(getConf());
   break;
 default:
   command = this;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30bb1970/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index 9749409..1d07a63 100644
--- 

[13/50] hadoop git commit: HADOOP-13700. Remove unthrown IOException from TrashPolicy#initialize and #getInstance signatures.

2016-10-17 Thread umamahesh
HADOOP-13700. Remove unthrown IOException from TrashPolicy#initialize and 
#getInstance signatures.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12d739a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12d739a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12d739a3

Branch: refs/heads/HDFS-10285
Commit: 12d739a34ba868b3f7f5adf7f37a60d4aca9061b
Parents: 85cd06f
Author: Andrew Wang 
Authored: Wed Oct 12 15:19:52 2016 -0700
Committer: Andrew Wang 
Committed: Wed Oct 12 15:19:52 2016 -0700

--
 .../src/main/java/org/apache/hadoop/fs/TrashPolicy.java| 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12d739a3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
index 157b9ab..2fe3fd1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
@@ -53,9 +53,8 @@ public abstract class TrashPolicy extends Configured {
* not assume trash always under /user/$USER due to HDFS encryption zone.
* @param conf the configuration to be used
* @param fs the filesystem to be used
-   * @throws IOException
*/
-  public void initialize(Configuration conf, FileSystem fs) throws IOException{
+  public void initialize(Configuration conf, FileSystem fs) {
 throw new UnsupportedOperationException();
   }
 
@@ -137,8 +136,7 @@ public abstract class TrashPolicy extends Configured {
* @param fs the file system to be used
* @return an instance of TrashPolicy
*/
-  public static TrashPolicy getInstance(Configuration conf, FileSystem fs)
-  throws IOException {
+  public static TrashPolicy getInstance(Configuration conf, FileSystem fs) {
 Class trashClass = conf.getClass(
 "fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
 TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] hadoop git commit: YARN-5466. DefaultContainerExecutor needs JavaDocs (templedf via rkanter)

2016-10-17 Thread umamahesh
YARN-5466. DefaultContainerExecutor needs JavaDocs (templedf via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5d92359
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5d92359
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5d92359

Branch: refs/heads/HDFS-10285
Commit: f5d92359145dfb820a9521e00e2d44c4ee96e67e
Parents: 8fd4c37
Author: Robert Kanter 
Authored: Mon Oct 17 14:29:09 2016 -0700
Committer: Robert Kanter 
Committed: Mon Oct 17 14:29:09 2016 -0700

--
 .../nodemanager/DefaultContainerExecutor.java   | 272 ---
 .../WindowsSecureContainerExecutor.java |   2 +-
 2 files changed, 231 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5d92359/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index 59b69ac..568c80b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -65,6 +65,11 @@ import 
org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 
+/**
+ * The {@code DefaultContainerExecuter} class offers generic container
+ * execution services. Process execution is handled in a platform-independent
+ * way via {@link ProcessBuilder}.
+ */
 public class DefaultContainerExecutor extends ContainerExecutor {
 
   private static final Log LOG = LogFactory
@@ -72,10 +77,17 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 
   private static final int WIN_MAX_PATH = 260;
 
+  /**
+   * A {@link FileContext} for the local file system.
+   */
   protected final FileContext lfs;
 
   private String logDirPermissions = null;
 
+  /**
+   * Default constructor for use in testing.
+   */
+  @VisibleForTesting
   public DefaultContainerExecutor() {
 try {
   this.lfs = FileContext.getLocalFSFileContext();
@@ -84,15 +96,40 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 }
   }
 
+  /**
+   * Create an instance with a given {@link FileContext}.
+   *
+   * @param lfs the given {@link FileContext}
+   */
   DefaultContainerExecutor(FileContext lfs) {
 this.lfs = lfs;
   }
 
+  /**
+   * Copy a file using the {@link #lfs} {@link FileContext}.
+   *
+   * @param src the file to copy
+   * @param dst where to copy the file
+   * @param owner the owner of the new copy. Used only in secure Windows
+   * clusters
+   * @throws IOException when the copy fails
+   * @see WindowsSecureContainerExecutor
+   */
   protected void copyFile(Path src, Path dst, String owner) throws IOException 
{
 lfs.util().copy(src, dst, false, true);
   }
   
-  protected void setScriptExecutable(Path script, String owner) throws 
IOException {
+  /**
+   * Make a file executable using the {@link #lfs} {@link FileContext}.
+   *
+   * @param script the path to make executable
+   * @param owner the new owner for the file. Used only in secure Windows
+   * clusters
+   * @throws IOException when the change mode operation fails
+   * @see WindowsSecureContainerExecutor
+   */
+  protected void setScriptExecutable(Path script, String owner)
+  throws IOException {
 lfs.setPermission(script, ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
   }
 
@@ -122,14 +159,16 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 // randomly choose the local directory
 Path appStorageDir = getWorkingDir(localDirs, user, appId);
 
-String tokenFn = String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, 
locId);
+String tokenFn =
+String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId);
 Path tokenDst = new Path(appStorageDir, tokenFn);
 copyFile(nmPrivateContainerTokensPath, tokenDst, user);
-LOG.info("Copying from " + nmPrivateContainerTokensPath + " to " + 
tokenDst);
+LOG.info("Copying from " + nmPrivateContainerTokensPath
++ " to " + 

[17/50] hadoop git commit: HDFS-10949. DiskBalancer: deprecate TestDiskBalancer#setVolumeCapacity. Contributed by Xiaobing Zhou.

2016-10-17 Thread umamahesh
HDFS-10949. DiskBalancer: deprecate TestDiskBalancer#setVolumeCapacity. 
Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b371c563
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b371c563
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b371c563

Branch: refs/heads/HDFS-10285
Commit: b371c56365c14bbab0f5cdfffc0becaabfde8145
Parents: 1291254
Author: Anu Engineer 
Authored: Thu Oct 13 10:26:07 2016 -0700
Committer: Anu Engineer 
Committed: Thu Oct 13 10:26:07 2016 -0700

--
 .../server/diskbalancer/TestDiskBalancer.java   | 44 +---
 1 file changed, 11 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b371c563/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index d911e74..9985210 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -44,7 +44,6 @@ import 
org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
-import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -137,6 +136,7 @@ public class TestDiskBalancer {
 final int dataNodeCount = 1;
 final int dataNodeIndex = 0;
 final int sourceDiskIndex = 0;
+final long cap = blockSize * 2L * blockCount;
 
 MiniDFSCluster cluster = new ClusterBuilder()
 .setBlockCount(blockCount)
@@ -144,6 +144,7 @@ public class TestDiskBalancer {
 .setDiskCount(diskCount)
 .setNumDatanodes(dataNodeCount)
 .setConf(conf)
+.setCapacities(new long[] {cap, cap})
 .build();
 try {
   DataMover dataMover = new DataMover(cluster, dataNodeIndex,
@@ -174,7 +175,7 @@ public class TestDiskBalancer {
 final int dataNodeCount = 1;
 final int dataNodeIndex = 0;
 final int sourceDiskIndex = 0;
-
+final long cap = blockSize * 2L * blockCount;
 
 MiniDFSCluster cluster = new ClusterBuilder()
 .setBlockCount(blockCount)
@@ -182,9 +183,9 @@ public class TestDiskBalancer {
 .setDiskCount(diskCount)
 .setNumDatanodes(dataNodeCount)
 .setConf(conf)
+.setCapacities(new long[] {cap, cap, cap})
 .build();
 
-
 try {
   DataMover dataMover = new DataMover(cluster, dataNodeIndex,
   sourceDiskIndex, conf, blockSize, blockCount);
@@ -221,6 +222,7 @@ public class TestDiskBalancer {
 final int dataNodeCount = 1;
 final int dataNodeIndex = 0;
 final int sourceDiskIndex = 0;
+final long cap = blockSize * 2L * blockCount;
 
 MiniDFSCluster cluster = new ClusterBuilder()
 .setBlockCount(blockCount)
@@ -228,6 +230,7 @@ public class TestDiskBalancer {
 .setDiskCount(diskCount)
 .setNumDatanodes(dataNodeCount)
 .setConf(conf)
+.setCapacities(new long[] {cap, cap})
 .build();
 
 try {
@@ -246,24 +249,6 @@ public class TestDiskBalancer {
   }
 
   /**
-   * Sets alll Disks capacity to size specified.
-   *
-   * @param cluster - DiskBalancerCluster
-   * @param size- new size of the disk
-   */
-  private void setVolumeCapacity(DiskBalancerCluster cluster, long size,
- String diskType) {
-Preconditions.checkNotNull(cluster);
-for (DiskBalancerDataNode node : cluster.getNodes()) {
-  for (DiskBalancerVolume vol :
-  node.getVolumeSets().get(diskType).getVolumes()) {
-vol.setCapacity(size);
-  }
-  node.getVolumeSets().get(diskType).computeVolumeDataDensity();
-}
-  }
-
-  /**
* Helper class that allows us to create different kinds of MiniDFSClusters
* and populate data.
*/
@@ -274,6 +259,7 @@ public class TestDiskBalancer {
 private int fileLen;
 private int blockCount;
 private int diskCount;
+private long[] capacities;
 
 public ClusterBuilder 

[12/50] hadoop git commit: HDFS-10789. Route webhdfs through the RPC call queue. Contributed by Daryn Sharp and Rushabh S Shah.

2016-10-17 Thread umamahesh
HDFS-10789. Route webhdfs through the RPC call queue. Contributed by Daryn 
Sharp and Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85cd06f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85cd06f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85cd06f6

Branch: refs/heads/HDFS-10285
Commit: 85cd06f6636f295ad1f3bf2a90063f4714c9cca7
Parents: 6476934
Author: Kihwal Lee 
Authored: Wed Oct 12 15:11:42 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Oct 12 15:11:42 2016 -0500

--
 .../org/apache/hadoop/ipc/ExternalCall.java |   9 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java |   6 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  15 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  12 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   6 +-
 .../web/resources/NamenodeWebHdfsMethods.java   | 150 +++
 .../src/main/resources/hdfs-default.xml |   7 +
 .../server/namenode/TestNamenodeRetryCache.java |  25 +++-
 .../web/resources/TestWebHdfsDataLocality.java  |  25 +++-
 10 files changed, 160 insertions(+), 98 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
index 9b4cbcf..5566136 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ipc;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.ipc.Server.Call;
@@ -37,14 +38,10 @@ public abstract class ExternalCall extends Call {
 
   public abstract UserGroupInformation getRemoteUser();
 
-  public final T get() throws IOException, InterruptedException {
+  public final T get() throws InterruptedException, ExecutionException {
 waitForCompletion();
 if (error != null) {
-  if (error instanceof IOException) {
-throw (IOException)error;
-  } else {
-throw new IOException(error);
-  }
+  throw new ExecutionException(error);
 }
 return result;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 92d9183..72b603a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -72,6 +72,7 @@ import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
@@ -989,8 +990,9 @@ public class TestRPC extends TestRpcBase {
   try {
 exceptionCall.get();
 fail("didn't throw");
-  } catch (IOException ioe) {
-assertEquals(expectedIOE.getMessage(), ioe.getMessage());
+  } catch (ExecutionException ee) {
+assertTrue((ee.getCause()) instanceof IOException);
+assertEquals(expectedIOE.getMessage(), ee.getCause().getMessage());
   }
 } finally {
   server.stop();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85cd06f6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 18209ae..10c0ad6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -70,6 +70,9 @@ public class DFSConfigKeys extends 

[25/50] hadoop git commit: HADOOP-13669. Addendum patch for KMS Server should log exceptions before throwing.

2016-10-17 Thread umamahesh
HADOOP-13669. Addendum patch for KMS Server should log exceptions before 
throwing.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae51b11f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae51b11f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae51b11f

Branch: refs/heads/HDFS-10285
Commit: ae51b11f7872eaac558acf00fd23f6d7b1841cfe
Parents: cf3f43e
Author: Xiao Chen 
Authored: Thu Oct 13 22:32:08 2016 -0700
Committer: Xiao Chen 
Committed: Thu Oct 13 22:32:08 2016 -0700

--
 .../hadoop-kms/dev-support/findbugsExcludeFile.xml| 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae51b11f/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
index bc92ed7..78c4ca6 100644
--- a/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-kms/dev-support/findbugsExcludeFile.xml
@@ -38,4 +38,11 @@
 
 
   
+  
+  
+
+
+  
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] hadoop git commit: HDFS-9820. Improve distcp to support efficient restore to an earlier snapshot. Contributed by Yongjun Zhang.

2016-10-17 Thread umamahesh
HDFS-9820. Improve distcp to support efficient restore to an earlier snapshot. 
Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/412c4c9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/412c4c9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/412c4c9a

Branch: refs/heads/HDFS-10285
Commit: 412c4c9a342b73bf1c1a7f43ea91245cbf94d02d
Parents: ed9fcbe
Author: Yongjun Zhang 
Authored: Fri Oct 14 15:17:33 2016 -0700
Committer: Yongjun Zhang 
Committed: Mon Oct 17 11:04:42 2016 -0700

--
 .../java/org/apache/hadoop/tools/DiffInfo.java  |  47 +-
 .../java/org/apache/hadoop/tools/DistCp.java|  34 +-
 .../apache/hadoop/tools/DistCpConstants.java|   1 +
 .../apache/hadoop/tools/DistCpOptionSwitch.java |   5 +
 .../org/apache/hadoop/tools/DistCpOptions.java  |  79 +-
 .../org/apache/hadoop/tools/DistCpSync.java | 256 --
 .../org/apache/hadoop/tools/OptionsParser.java  |  27 +-
 .../apache/hadoop/tools/SimpleCopyListing.java  |  17 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java |   4 +-
 .../hadoop/tools/TestDistCpSyncReverseBase.java | 868 +++
 .../tools/TestDistCpSyncReverseFromSource.java  |  36 +
 .../tools/TestDistCpSyncReverseFromTarget.java  |  36 +
 .../apache/hadoop/tools/TestOptionsParser.java  |  85 +-
 13 files changed, 1340 insertions(+), 155 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/412c4c9a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
index 79bb7fe..7e56301 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
@@ -44,28 +44,49 @@ class DiffInfo {
   };
 
   /** The source file/dir of the rename or deletion op */
-  final Path source;
+  private Path source;
+  /** The target file/dir of the rename op. Null means the op is deletion. */
+  private Path target;
+
+  private SnapshotDiffReport.DiffType type;
   /**
* The intermediate file/dir for the op. For a rename or a delete op,
* we first rename the source to this tmp file/dir.
*/
   private Path tmp;
-  /** The target file/dir of the rename op. Null means the op is deletion. */
-  Path target;
-
-  private final SnapshotDiffReport.DiffType type;
-
-  public SnapshotDiffReport.DiffType getType(){
-return this.type;
-  }
 
-  DiffInfo(Path source, Path target, SnapshotDiffReport.DiffType type) {
+  DiffInfo(final Path source, final Path target,
+  SnapshotDiffReport.DiffType type) {
 assert source != null;
 this.source = source;
 this.target= target;
 this.type = type;
   }
 
+  void setSource(final Path source) {
+this.source = source;
+  }
+
+  Path getSource() {
+return source;
+  }
+
+  void setTarget(final Path target) {
+this.target = target;
+  }
+
+  Path getTarget() {
+return target;
+  }
+
+  public void setType(final SnapshotDiffReport.DiffType type){
+this.type = type;
+  }
+
+  public SnapshotDiffReport.DiffType getType(){
+return type;
+  }
+
   void setTmp(Path tmp) {
 this.tmp = tmp;
   }
@@ -73,4 +94,10 @@ class DiffInfo {
   Path getTmp() {
 return tmp;
   }
+
+  @Override
+  public String toString() {
+return type + ": src=" + String.valueOf(source) + " tgt="
++ String.valueOf(target) + " tmp=" + String.valueOf(tmp);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/412c4c9a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index be58f13..e9decd2 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -77,6 +77,21 @@ public class DistCp extends Configured implements Tool {
   private boolean submitted;
   private FileSystem jobFS;
 
+  private void prepareFileListing(Job job) throws Exception {
+if (inputOptions.shouldUseSnapshotDiff()) {
+  try {
+DistCpSync distCpSync = new DistCpSync(inputOptions, getConf());
+distCpSync.sync();
+createInputFileListingWithDiff(job, distCpSync);
+  } catch (IOException e) {
+throw new Exception("DistCp"
++ " sync 

[07/50] hadoop git commit: HDFS-10903. Replace config key literal strings with config key names II: hadoop hdfs. Contributed by Chen Liang

2016-10-17 Thread umamahesh
HDFS-10903. Replace config key literal strings with config key names II: hadoop 
hdfs. Contributed by Chen Liang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c9a0106
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c9a0106
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c9a0106

Branch: refs/heads/HDFS-10285
Commit: 3c9a01062e9097c2ed1db75318482543db2e382f
Parents: 61f0490
Author: Mingliang Liu 
Authored: Tue Oct 11 16:29:30 2016 -0700
Committer: Mingliang Liu 
Committed: Tue Oct 11 16:29:30 2016 -0700

--
 .../java/org/apache/hadoop/fs/http/server/FSOperations.java | 9 +++--
 .../hadoop/lib/service/hadoop/FileSystemAccessService.java  | 6 --
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 +++
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 8 
 .../test/java/org/apache/hadoop/hdfs/TestFileAppend4.java   | 3 ++-
 .../hdfs/server/blockmanagement/TestBlockTokenWithDFS.java  | 3 ++-
 6 files changed, 26 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 46948f9..001bc92 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -48,6 +48,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTPFS_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT;
+
 /**
  * FileSystem operation executors used by {@link HttpFSServer}.
  */
@@ -462,7 +465,8 @@ public class FSOperations {
 blockSize = fs.getDefaultBlockSize(path);
   }
   FsPermission fsPermission = new FsPermission(permission);
-  int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
+  int bufferSize = fs.getConf().getInt(HTTPFS_BUFFER_SIZE_KEY,
+  HTTP_BUFFER_SIZE_DEFAULT);
   OutputStream os = fs.create(path, fsPermission, override, bufferSize, 
replication, blockSize, null);
   IOUtils.copyBytes(is, os, bufferSize, true);
   os.close();
@@ -752,7 +756,8 @@ public class FSOperations {
  */
 @Override
 public InputStream execute(FileSystem fs) throws IOException {
-  int bufferSize = 
HttpFSServerWebApp.get().getConfig().getInt("httpfs.buffer.size", 4096);
+  int bufferSize = HttpFSServerWebApp.get().getConfig().getInt(
+  HTTPFS_BUFFER_SIZE_KEY, HTTP_BUFFER_SIZE_DEFAULT);
   return fs.open(path, bufferSize);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c9a0106/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
index 0b767be..61d3b45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
@@ -50,6 +50,8 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+
 @InterfaceAudience.Private
 public class FileSystemAccessService extends BaseService implements 
FileSystemAccess {
   private static final Logger LOG = 
LoggerFactory.getLogger(FileSystemAccessService.class);
@@ -159,7 +161,7 @@ public class FileSystemAccessService extends BaseService 
implements FileSystemAc
 throw new ServiceException(FileSystemAccessException.ERROR.H01, 
KERBEROS_PRINCIPAL);
   }
   Configuration conf = new Configuration();
-  conf.set("hadoop.security.authentication", "kerberos");
+  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
   UserGroupInformation.setConfiguration(conf);
   try {
 

[39/50] hadoop git commit: HDFS-11003. Expose XmitsInProgress through DataNodeMXBean. Contributed By Brahma Reddy Battula

2016-10-17 Thread umamahesh
HDFS-11003. Expose XmitsInProgress through DataNodeMXBean. Contributed By 
Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f4ae85b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f4ae85b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f4ae85b

Branch: refs/heads/HDFS-10285
Commit: 5f4ae85bd8a20510948696467873498723b06477
Parents: 5ad037d
Author: Brahma Reddy Battula 
Authored: Sat Oct 15 22:28:33 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Sat Oct 15 22:28:33 2016 +0530

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java  | 5 +++--
 .../org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java | 6 ++
 .../apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java | 6 +-
 3 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4ae85b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index cb8e308..8f65efe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2101,8 +2101,9 @@ public class DataNode extends ReconfigurableBase
   }
 }
   }
-  
-  int getXmitsInProgress() {
+
+  @Override //DataNodeMXBean
+  public int getXmitsInProgress() {
 return xmitsInProgress.get();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4ae85b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
index 5ec4cda..5d4c218 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
@@ -101,6 +101,12 @@ public interface DataNodeMXBean {
   public int getXceiverCount();
 
   /**
+   * Returns an estimate of the number of data replication/reconstruction tasks
+   * running currently.
+   */
+  public int getXmitsInProgress();
+
+  /**
* Gets the network error counts on a per-Datanode basis.
*/
   public Map> getDatanodeNetworkCounts();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4ae85b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index 8b0d5cb..a77c943 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -96,7 +96,11 @@ public class TestDataNodeMXBean {
   int xceiverCount = (Integer)mbs.getAttribute(mxbeanName,
   "XceiverCount");
   Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
-
+  // Ensure mxbean's XmitsInProgress is same as the DataNode's
+  // live value.
+  int xmitsInProgress =
+  (Integer) mbs.getAttribute(mxbeanName, "XmitsInProgress");
+  Assert.assertEquals(datanode.getXmitsInProgress(), xmitsInProgress);
   String bpActorInfo = (String)mbs.getAttribute(mxbeanName,
   "BPServiceActorInfo");
   Assert.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] hadoop git commit: HADOOP-13721. Remove stale method ViewFileSystem#getTrashCanLocation. Contributed by Manoj Govindassamy.

2016-10-17 Thread umamahesh
HADOOP-13721. Remove stale method ViewFileSystem#getTrashCanLocation. 
Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aee538be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aee538be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aee538be

Branch: refs/heads/HDFS-10285
Commit: aee538be6c2ab324de4d7834cd3347959272de01
Parents: 8c520a2
Author: Andrew Wang 
Authored: Fri Oct 14 14:08:31 2016 -0700
Committer: Andrew Wang 
Committed: Fri Oct 14 14:08:31 2016 -0700

--
 .../main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java  | 6 --
 1 file changed, 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aee538be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index edc59ab..f6947ff 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -220,12 +220,6 @@ public class ViewFileSystem extends FileSystem {
 this(FsConstants.VIEWFS_URI, conf);
   }
   
-  public Path getTrashCanLocation(final Path f) throws FileNotFoundException {
-final InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(f), true);
-return res.isInternalDir() ? null : 
res.targetFileSystem.getHomeDirectory();
-  }
-  
   @Override
   public URI getUri() {
 return myUri;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] hadoop git commit: HADOOP-13697. LogLevel#main should not throw exception if no arguments. Contributed by Mingliang Liu

2016-10-17 Thread umamahesh
HADOOP-13697. LogLevel#main should not throw exception if no arguments. 
Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fb392a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fb392a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fb392a5

Branch: refs/heads/HDFS-10285
Commit: 2fb392a587d288b628936ca6d18fabad04afc585
Parents: 809cfd2
Author: Mingliang Liu 
Authored: Fri Oct 7 14:05:40 2016 -0700
Committer: Mingliang Liu 
Committed: Tue Oct 11 10:57:08 2016 -0700

--
 .../src/main/java/org/apache/hadoop/log/LogLevel.java   | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fb392a5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
index 4fa839f..79eae12 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
@@ -47,15 +47,17 @@ import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
 
 /**
  * Change log level in runtime.
  */
 @InterfaceStability.Evolving
 public class LogLevel {
-  public static final String USAGES = "\nUsage: General options are:\n"
+  public static final String USAGES = "\nUsage: Command options are:\n"
   + "\t[-getlevel   [-protocol (http|https)]\n"
   + "\t[-setlevel"
   + "[-protocol (http|https)]\n";
@@ -67,7 +69,7 @@ public class LogLevel {
*/
   public static void main(String[] args) throws Exception {
 CLI cli = new CLI(new Configuration());
-System.exit(cli.run(args));
+System.exit(ToolRunner.run(cli, args));
   }
 
   /**
@@ -81,6 +83,7 @@ public class LogLevel {
 
   private static void printUsage() {
 System.err.println(USAGES);
+GenericOptionsParser.printGenericCommandUsage(System.err);
   }
 
   public static boolean isValidProtocol(String protocol) {
@@ -107,7 +110,7 @@ public class LogLevel {
 sendLogLevelRequest();
   } catch (HadoopIllegalArgumentException e) {
 printUsage();
-throw e;
+return -1;
   }
   return 0;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] hadoop git commit: HDFS-11008. Change unit test for testing parsing "-source" parameter in Balancer CLI. Contributed by Mingliang Liu

2016-10-17 Thread umamahesh
HDFS-11008. Change unit test for testing parsing "-source" parameter in 
Balancer CLI. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76cc84e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76cc84e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76cc84e6

Branch: refs/heads/HDFS-10285
Commit: 76cc84e6d41c2b02218c2c98d60481cd565e067c
Parents: aee538b
Author: Mingliang Liu 
Authored: Thu Oct 13 17:51:38 2016 -0700
Committer: Mingliang Liu 
Committed: Fri Oct 14 14:29:02 2016 -0700

--
 .../hdfs/server/balancer/TestBalancer.java  | 61 
 1 file changed, 38 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76cc84e6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 73a4cbc..f58a3ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -1282,6 +1282,14 @@ public class TestBalancer {
 } catch (IllegalArgumentException e) {
 
 }
+
+parameters = new String[] {"-source"};
+try {
+  Balancer.Cli.parse(parameters);
+  fail(reason + " for -source parameter");
+} catch (IllegalArgumentException ignored) {
+  // expected
+}
   }
 
   @Test
@@ -1800,11 +1808,12 @@ public class TestBalancer {
 final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf);
 
 { // run Balancer with min-block-size=50
-  BalancerParameters.Builder b =
-  new BalancerParameters.Builder();
-  b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
-  b.setThreshold(1);
-  final BalancerParameters p = b.build();
+  final BalancerParameters p = Balancer.Cli.parse(new String[] {
+  "-policy", BalancingPolicy.Node.INSTANCE.getName(),
+  "-threshold", "1"
+  });
+  assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
+  assertEquals(p.getThreshold(), 1.0, 0.001);
 
   conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 
50);
   final int r = Balancer.run(namenodes, p, conf);
@@ -1819,12 +1828,14 @@ public class TestBalancer {
   for(int i = capacities.length; i < datanodes.size(); i++) {
 sourceNodes.add(datanodes.get(i).getDisplayName());
   }
-  BalancerParameters.Builder b =
-  new BalancerParameters.Builder();
-  b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
-  b.setThreshold(1);
-  b.setSourceNodes(sourceNodes);
-  final BalancerParameters p = b.build();
+  final BalancerParameters p = Balancer.Cli.parse(new String[] {
+  "-policy", BalancingPolicy.Node.INSTANCE.getName(),
+  "-threshold", "1",
+  "-source", StringUtils.join(sourceNodes, ',')
+  });
+  assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
+  assertEquals(p.getThreshold(), 1.0, 0.001);
+  assertEquals(p.getSourceNodes(), sourceNodes);
 
   conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 
50);
   final int r = Balancer.run(namenodes, p, conf);
@@ -1835,12 +1846,14 @@ public class TestBalancer {
   final Set sourceNodes = new HashSet<>();
   final List datanodes = cluster.getDataNodes();
   sourceNodes.add(datanodes.get(0).getDisplayName());
-  BalancerParameters.Builder b =
-  new BalancerParameters.Builder();
-  b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
-  b.setThreshold(1);
-  b.setSourceNodes(sourceNodes);
-  final BalancerParameters p = b.build();
+  final BalancerParameters p = Balancer.Cli.parse(new String[] {
+  "-policy", BalancingPolicy.Node.INSTANCE.getName(),
+  "-threshold", "1",
+  "-source", StringUtils.join(sourceNodes, ',')
+  });
+  assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE);
+  assertEquals(p.getThreshold(), 1.0, 0.001);
+  assertEquals(p.getSourceNodes(), sourceNodes);
 
   conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
   final int r = Balancer.run(namenodes, p, conf);
@@ -1853,12 +1866,14 @@ public class TestBalancer {
   for(int i = 0; i < capacities.length; i++) {
 sourceNodes.add(datanodes.get(i).getDisplayName());
   }
-  

[45/50] hadoop git commit: HDFS-11013. Correct typos in native erasure coding dump code. Contributed by László Bence Nagy.

2016-10-17 Thread umamahesh
HDFS-11013. Correct typos in native erasure coding dump code. Contributed by 
László Bence Nagy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b671ee68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b671ee68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b671ee68

Branch: refs/heads/HDFS-10285
Commit: b671ee6846b79a6d106efed7cf7e1209b2cc408d
Parents: 987ee51
Author: Andrew Wang 
Authored: Mon Oct 17 14:14:50 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 17 14:14:50 2016 -0700

--
 .../main/native/src/org/apache/hadoop/io/erasurecode/dump.c  | 8 
 .../native/src/org/apache/hadoop/io/erasurecode/isal_load.h  | 2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b671ee68/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
index 20bd189..e48032e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
@@ -57,11 +57,11 @@ void dumpCodingMatrix(unsigned char* buf, int n1, int n2) {
 
 void dumpEncoder(IsalEncoder* pCoder) {
   int numDataUnits = pCoder->coder.numDataUnits;
-  int numParityUnits = pCoder->coder.numDataUnits;
+  int numParityUnits = pCoder->coder.numParityUnits;
   int numAllUnits = pCoder->coder.numAllUnits;
 
-  printf("Encoding (numAlnumParityUnitslUnits = %d, numDataUnits = %d)\n",
-numParityUnits, numDataUnits);
+  printf("Encoding (numAllUnits = %d, numParityUnits = %d, numDataUnits = 
%d)\n",
+numAllUnits, numParityUnits, numDataUnits);
 
   printf("\n\nEncodeMatrix:\n");
   dumpCodingMatrix((unsigned char*) pCoder->encodeMatrix,
@@ -91,7 +91,7 @@ void dumpDecoder(IsalDecoder* pCoder) {
 
   printf("InvertMatrix:\n");
   dumpCodingMatrix((unsigned char*) pCoder->invertMatrix,
-   numDataUnits, numDataUnits);
+   numDataUnits, numAllUnits);
 
   printf("DecodeMatrix:\n");
   dumpCodingMatrix((unsigned char*) pCoder->decodeMatrix,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b671ee68/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
index 7cb7a6a..c46a531 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
@@ -57,7 +57,7 @@ typedef void (*__d_ec_encode_data_update)(int, int, int, int, 
unsigned char*,
 #endif
 
 #ifdef WINDOWS
-// For erasure_code.h
+// For gf_util.h
 typedef unsigned char (__cdecl *__d_gf_mul)(unsigned char, unsigned char);
 typedef unsigned char (__cdecl *__d_gf_inv)(unsigned char);
 typedef void (__cdecl *__d_gf_gen_rs_matrix)(unsigned char *, int, int);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] hadoop git commit: HDFS-11012. Unnecessary INFO logging on DFSClients for InvalidToken. Contributed by Harsh J.

2016-10-17 Thread umamahesh
HDFS-11012. Unnecessary INFO logging on DFSClients for InvalidToken. 
Contributed by Harsh J.

This closes #142


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ad037df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ad037df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ad037df

Branch: refs/heads/HDFS-10285
Commit: 5ad037df25ab3206509083276b7ef4ef001be48b
Parents: 391ce53
Author: Akira Ajisaka 
Authored: Sat Oct 15 22:14:24 2016 +0900
Committer: Akira Ajisaka 
Committed: Sat Oct 15 22:14:24 2016 +0900

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java| 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ad037df/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index dbffc64..5783f90 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1261,8 +1261,9 @@ public class DFSInputStream extends FSInputStream
  */
 if (ex instanceof InvalidBlockTokenException ||
 ex instanceof InvalidToken) {
-  DFSClient.LOG.info("Access token was invalid when connecting to "
-  + targetAddr + " : " + ex);
+  DFSClient.LOG.debug(
+  "Access token was invalid when connecting to {}: {}",
+  targetAddr, ex);
   return true;
 }
 return false;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] hadoop git commit: HDFS-10922. Adding additional unit tests for Trash (II). Contributed by Weiwei Yang.

2016-10-17 Thread umamahesh
HDFS-10922. Adding additional unit tests for Trash (II). Contributed by Weiwei 
Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fd4c37c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fd4c37c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fd4c37c

Branch: refs/heads/HDFS-10285
Commit: 8fd4c37c45585d761d279f2f6032ff9c6c049895
Parents: b671ee6
Author: Xiaoyu Yao 
Authored: Mon Oct 17 08:22:31 2016 -0700
Committer: Xiaoyu Yao 
Committed: Mon Oct 17 14:21:36 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  40 +
 .../apache/hadoop/hdfs/TestDFSPermission.java   |  30 ++--
 .../org/apache/hadoop/hdfs/TestHDFSTrash.java   | 145 ++-
 3 files changed, 189 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd4c37c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index f80cd78..963aaa6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -70,6 +70,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -2014,4 +2015,43 @@ public class DFSTestUtil {
   }
 }, 1000, 6);
   }
+
+  /**
+   * Close current file system and create a new instance as given
+   * {@link UserGroupInformation}.
+   */
+  public static FileSystem login(final FileSystem fs,
+  final Configuration conf, final UserGroupInformation ugi)
+  throws IOException, InterruptedException {
+if (fs != null) {
+  fs.close();
+}
+return DFSTestUtil.getFileSystemAs(ugi, conf);
+  }
+
+  /**
+   * Test if the given {@link FileStatus} user, group owner and its permission
+   * are expected, throw {@link AssertionError} if any value is not expected.
+   */
+  public static void verifyFilePermission(FileStatus stat, String owner,
+  String group, FsAction u, FsAction g, FsAction o) {
+if(stat != null) {
+  if(!Strings.isNullOrEmpty(owner)) {
+assertEquals(owner, stat.getOwner());
+  }
+  if(!Strings.isNullOrEmpty(group)) {
+assertEquals(group, stat.getGroup());
+  }
+  FsPermission permission = stat.getPermission();
+  if(u != null) {
+assertEquals(u, permission.getUserAction());
+  }
+  if (g != null) {
+assertEquals(g, permission.getGroupAction());
+  }
+  if (o != null) {
+assertEquals(o, permission.getOtherAction());
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd4c37c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index d0d00e5..2705e67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -305,7 +305,7 @@ public class TestDFSPermission {
   fs.mkdirs(rootDir);
   fs.setPermission(rootDir, new FsPermission((short) 0777));
 
-  login(USER1);
+  fs = DFSTestUtil.login(fs, conf, USER1);
   fs.mkdirs(user1Dir);
   fs.setPermission(user1Dir, new FsPermission((short) 0755));
   fs.setOwner(user1Dir, USER1.getShortUserName(), GROUP2_NAME);
@@ -318,7 +318,7 @@ public class TestDFSPermission {
 // login as user2, attempt to delete /BSS/user1
 // this should fail because user2 has no permission to
 // its sub directory.
-login(USER2);
+fs = DFSTestUtil.login(fs, conf, USER2);
 fs.delete(user1Dir, true);
 fail("User2 should not be allowed to delete user1's dir.");
   } catch (AccessControlException e) {
@@ -331,7 +331,7 @@ public class TestDFSPermission {
   assertTrue(fs.exists(user1Dir));
 
   try {
-login(SUPERUSER);
+fs = DFSTestUtil.login(fs, conf, SUPERUSER);
 Trash trash 

[41/50] hadoop git commit: HADOOP-13661. Upgrade HTrace version. Contributed by Sean Mackrory.

2016-10-17 Thread umamahesh
HADOOP-13661. Upgrade HTrace version. Contributed by Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed9fcbec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed9fcbec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed9fcbec

Branch: refs/heads/HDFS-10285
Commit: ed9fcbec544df149d08d9ac31989a7291eff6507
Parents: 1f304b0
Author: Wei-Chiu Chuang 
Authored: Mon Oct 17 05:04:49 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Oct 17 05:04:49 2016 -0700

--
 hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md | 2 +-
 hadoop-project/pom.xml   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed9fcbec/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
index cbdee8a..9b7084d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
@@ -48,7 +48,7 @@ LocalFileSpanReceiver is included in the htrace-core4 jar 
which is bundled
 with Hadoop.)
 
 ```
-$ cp htrace-htraced/target/htrace-htraced-4.0.1-incubating.jar 
$HADOOP_HOME/share/hadoop/common/lib/
+$ cp htrace-htraced/target/htrace-htraced-4.1.0-incubating.jar 
$HADOOP_HOME/share/hadoop/common/lib/
 ```
 
 ### Dynamic update of tracing configuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed9fcbec/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 82adebf..5826cf6 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -882,7 +882,7 @@
   
 org.apache.htrace
 htrace-core4
-4.0.1-incubating
+4.1.0-incubating
   
   
 org.jdom


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] hadoop git commit: HDFS-11000. webhdfs PUT does not work if requests are routed to call queue. Contributed by Kihwal Lee.

2016-10-17 Thread umamahesh
HDFS-11000. webhdfs PUT does not work if requests are routed to call queue. 
Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9454dc5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9454dc5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9454dc5e

Branch: refs/heads/HDFS-10285
Commit: 9454dc5e8091354cd0a4b8c8aa5f4004529db5d5
Parents: 901eca0
Author: Kihwal Lee 
Authored: Thu Oct 13 08:47:15 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 08:47:15 2016 -0500

--
 .../hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9454dc5e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 4887e35..4247a67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -332,7 +332,7 @@ public class NamenodeWebHdfsMethods {
 } else {
   //generate a token
   final Token t = generateDelegationToken(
-  namenode, ugi, userPrincipal.getName());
+  namenode, ugi, null);
   delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
 }
 final String query = op.toQueryString() + delegationQuery


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] hadoop git commit: HADOOP-13723. AliyunOSSInputStream#read() should update read bytes stat correctly. Contributed by Mingliang Liu

2016-10-17 Thread umamahesh
HADOOP-13723. AliyunOSSInputStream#read() should update read bytes stat 
correctly. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9f73f1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9f73f1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9f73f1b

Branch: refs/heads/HDFS-10285
Commit: d9f73f1b7cd893a7d88baa9bfd1b809a5dec9e59
Parents: ae51b11
Author: Mingliang Liu 
Authored: Thu Oct 13 17:05:28 2016 -0700
Committer: Mingliang Liu 
Committed: Thu Oct 13 22:33:55 2016 -0700

--
 .../java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9f73f1b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
index b87a3a7..a3af7ce 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
@@ -123,7 +123,7 @@ public class AliyunOSSInputStream extends FSInputStream {
 }
 
 if (statistics != null && byteRead >= 0) {
-  statistics.incrementBytesRead(1);
+  statistics.incrementBytesRead(byteRead);
 }
 return byteRead;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] hadoop git commit: HDFS-10801. [SPS]: Protocol buffer changes for sending storage movement commands from NN to DN. Contributed by Rakesh R

2016-10-17 Thread umamahesh
HDFS-10801. [SPS]: Protocol buffer changes for sending storage movement 
commands from NN to DN. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13a80503
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13a80503
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13a80503

Branch: refs/heads/HDFS-10285
Commit: 13a8050350962d939fd808cf2e20604311b81b8e
Parents: 07299e2
Author: Rakesh Radhakrishnan 
Authored: Tue Oct 11 11:44:06 2016 +0530
Committer: Uma Maheswara Rao G 
Committed: Mon Oct 17 16:05:26 2016 -0700

--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 90 
 .../blockmanagement/DatanodeDescriptor.java | 15 
 .../server/blockmanagement/DatanodeManager.java | 13 ++-
 .../hdfs/server/datanode/BPOfferService.java|  8 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  7 ++
 .../datanode/StoragePolicySatisfyWorker.java| 22 -
 .../protocol/BlockStorageMovementCommand.java   | 71 ++-
 .../hdfs/server/protocol/DatanodeProtocol.java  |  1 +
 .../src/main/proto/DatanodeProtocol.proto   | 22 +
 .../namenode/TestStoragePolicySatisfier.java| 86 +++
 10 files changed, 273 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13a80503/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 78371f5..28c7590 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -48,6 +48,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDele
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockStorageMovementCommandProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockStorageMovementProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@@ -89,6 +91,8 @@ import 
org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.Block
 import 
org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand;
+import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
@@ -457,6 +461,8 @@ public class PBHelper {
   return PBHelper.convert(proto.getBlkIdCmd());
 case BlockECReconstructionCommand:
   return PBHelper.convert(proto.getBlkECReconstructionCmd());
+case BlockStorageMovementCommand:
+  return PBHelper.convert(proto.getBlkStorageMovementCmd());
 default:
   return null;
 }
@@ -591,6 +597,11 @@ public class PBHelper {
   .setBlkECReconstructionCmd(
   convert((BlockECReconstructionCommand) datanodeCommand));
   break;
+case DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT:
+  builder.setCmdType(DatanodeCommandProto.Type.BlockStorageMovementCommand)
+  .setBlkStorageMovementCmd(
+  convert((BlockStorageMovementCommand) datanodeCommand));
+  break;
 case DatanodeProtocol.DNA_UNKNOWN: //Not expected
 default:
   builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
@@ -963,4 +974,83 @@ public class PBHelper {
 DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION,
 blkECReconstructionInfos);
   }
+
+  private static BlockStorageMovementCommandProto convert(
+  BlockStorageMovementCommand blkStorageMovementCmd) {
+BlockStorageMovementCommandProto.Builder builder =
+

[31/50] hadoop git commit: HDFS-10883. 's behavior is not consistent in DFS after enabling EZ. Contributed by Yuanbo Liu.

2016-10-17 Thread umamahesh
HDFS-10883. 's behavior is not consistent in DFS after enabling EZ. Contributed 
by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0007360c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0007360c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0007360c

Branch: refs/heads/HDFS-10285
Commit: 0007360c3344b3485fa17de0fd2015a628de947c
Parents: 701c27a
Author: Andrew Wang 
Authored: Fri Oct 14 11:41:29 2016 -0700
Committer: Andrew Wang 
Committed: Fri Oct 14 11:41:29 2016 -0700

--
 .../hadoop/hdfs/DistributedFileSystem.java  |   5 +-
 .../src/site/markdown/TransparentEncryption.md  |   4 +-
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  10 +-
 .../namenode/TestNestedEncryptionZones.java | 175 +--
 4 files changed, 139 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0007360c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 548815f..18a29e8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2478,11 +2478,12 @@ public class DistributedFileSystem extends FileSystem {
*/
   @Override
   public Path getTrashRoot(Path path) {
-if ((path == null) || path.isRoot() || !dfs.isHDFSEncryptionEnabled()) {
+if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
   return super.getTrashRoot(path);
 }
 
-String parentSrc = path.getParent().toUri().getPath();
+String parentSrc = path.isRoot()?
+path.toUri().getPath():path.getParent().toUri().getPath();
 try {
   EncryptionZone ez = dfs.getEZForPath(parentSrc);
   if ((ez != null)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0007360c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
index e7d9f1d..b82b400 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
@@ -242,12 +242,14 @@ By default, distcp compares checksums provided by the 
filesystem to verify that
 Rename and Trash considerations
 -
 
-HDFS restricts file and directory renames across encryption zone boundaries. 
This includes renaming an encrypted file / directory into an unencrypted 
directory (e.g., `hdfs dfs mv /zone/encryptedFile /home/bob`), renaming an 
unencrypted file / directory into an encryption zone (e.g., `hdfs dfs mv 
/home/bob/unEncryptedFile /zone`), and renaming between two different 
encryption zones (e.g., `hdfs dfs mv /home/alice/zone1/foo /home/alice/zone2`). 
In these examples, `/zone`, `/home/alice/zone1`, and `/home/alice/zone2` are 
encryption zones, while `/home/bob` is not. A rename is only allowed if the 
source and destination paths are in the same encryption zone, or both paths are 
unencrypted (not in any encryption zone).
+HDFS restricts file and directory renames across encryption zone boundaries. 
This includes renaming an encrypted file / directory into an unencrypted 
directory (e.g., `hdfs dfs mv /zone/encryptedFile /home/bob`), renaming an 
unencrypted file or directory into an encryption zone (e.g., `hdfs dfs mv 
/home/bob/unEncryptedFile /zone`), and renaming between two different 
encryption zones (e.g., `hdfs dfs mv /home/alice/zone1/foo /home/alice/zone2`). 
In these examples, `/zone`, `/home/alice/zone1`, and `/home/alice/zone2` are 
encryption zones, while `/home/bob` is not. A rename is only allowed if the 
source and destination paths are in the same encryption zone, or both paths are 
unencrypted (not in any encryption zone).
 
 This restriction enhances security and eases system management significantly. 
All file EDEKs under an encryption zone are encrypted with the encryption zone 
key. Therefore, if the encryption zone key is compromised, it is important to 
identify all vulnerable files and re-encrypt them. This is fundamentally 
difficult if a file initially created in an encryption zone can be renamed to 
an arbitrary location in the filesystem.
 
 To 

[37/50] hadoop git commit: HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. Contributed by Vinitha Gankidi.

2016-10-17 Thread umamahesh
HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for 
interleaving block reports. Contributed by Vinitha Gankidi.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/391ce535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/391ce535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/391ce535

Branch: refs/heads/HDFS-10285
Commit: 391ce535a739dc92cb90017d759217265a4fd969
Parents: 30bb197
Author: Vinitha Reddy Gankidi 
Authored: Fri Oct 14 10:37:44 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Oct 14 18:13:54 2016 -0700

--
 .../server/blockmanagement/BlockManager.java| 75 ++--
 .../blockmanagement/DatanodeDescriptor.java | 48 -
 .../blockmanagement/DatanodeStorageInfo.java| 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  4 +-
 .../blockmanagement/TestBlockManager.java   | 19 +++--
 .../TestNameNodePrunesMissingStorages.java  | 70 +++---
 .../server/datanode/BlockReportTestBase.java| 50 +
 .../TestAddOverReplicatedStripedBlocks.java |  4 ++
 8 files changed, 147 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/391ce535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7949439..7b13add 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1347,6 +1347,8 @@ public class BlockManager implements BlockStatsMXBean {
   }
 }
 checkSafeMode();
+LOG.info("Removed blocks associated with storage {} from DataNode {}",
+storageInfo, node);
   }
 
   /**
@@ -2191,7 +2193,7 @@ public class BlockManager implements BlockStatsMXBean {
   public boolean processReport(final DatanodeID nodeID,
   final DatanodeStorage storage,
   final BlockListAsLongs newReport,
-  BlockReportContext context, boolean lastStorageInRpc) throws IOException 
{
+  BlockReportContext context) throws IOException {
 namesystem.writeLock();
 final long startTime = Time.monotonicNow(); //after acquiring write lock
 final long endTime;
@@ -2245,32 +2247,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
   
   storageInfo.receivedBlockReport();
-  if (context != null) {
-storageInfo.setLastBlockReportId(context.getReportId());
-if (lastStorageInRpc) {
-  int rpcsSeen = node.updateBlockReportContext(context);
-  if (rpcsSeen >= context.getTotalRpcs()) {
-long leaseId = blockReportLeaseManager.removeLease(node);
-BlockManagerFaultInjector.getInstance().
-removeBlockReportLease(node, leaseId);
-List zombies = node.removeZombieStorages();
-if (zombies.isEmpty()) {
-  LOG.debug("processReport 0x{}: no zombie storages found.",
-  Long.toHexString(context.getReportId()));
-} else {
-  for (DatanodeStorageInfo zombie : zombies) {
-removeZombieReplicas(context, zombie);
-  }
-}
-node.clearBlockReportContext();
-  } else {
-LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
-"report.", Long.toHexString(context.getReportId()),
-(context.getTotalRpcs() - rpcsSeen)
-);
-  }
-}
-  }
 } finally {
   endTime = Time.monotonicNow();
   namesystem.writeUnlock();
@@ -2295,36 +2271,25 @@ public class BlockManager implements BlockStatsMXBean {
 return !node.hasStaleStorages();
   }
 
-  private void removeZombieReplicas(BlockReportContext context,
-  DatanodeStorageInfo zombie) {
-LOG.warn("processReport 0x{}: removing zombie storage {}, which no " +
-"longer exists on the DataNode.",
-Long.toHexString(context.getReportId()), zombie.getStorageID());
-assert(namesystem.hasWriteLock());
-Iterator iter = zombie.getBlockIterator();
-int prevBlocks = zombie.numBlocks();
-while (iter.hasNext()) {
-  BlockInfo block = iter.next();
-  // We assume that a block can be on only one storage in a DataNode.
-  // That's why we pass in the DatanodeDescriptor 

[30/50] hadoop git commit: HDFS-10735 Distcp using webhdfs on secure HA clusters fails with StandbyException

2016-10-17 Thread umamahesh
HDFS-10735 Distcp using webhdfs on secure HA clusters fails with 
StandbyException


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/701c27a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/701c27a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/701c27a7

Branch: refs/heads/HDFS-10285
Commit: 701c27a7762294e1a5fb2b3ac81f5534aa37f667
Parents: 8a9f663
Author: Benoy Antony 
Authored: Fri Oct 14 10:26:39 2016 -0700
Committer: Benoy Antony 
Committed: Fri Oct 14 10:26:39 2016 -0700

--
 .../java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java   | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/701c27a7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 19de5b5..af43d56 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -91,6 +91,7 @@ import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
@@ -471,6 +472,13 @@ public class WebHdfsFileSystem extends FileSystem
   }
 
   IOException re = JsonUtilClient.toRemoteException(m);
+
+  //check if exception is due to communication with a Standby name node
+  if (re.getMessage() != null && re.getMessage().endsWith(
+  StandbyException.class.getSimpleName())) {
+LOG.trace("Detected StandbyException", re);
+throw new IOException(re);
+  }
   // extract UGI-related exceptions and unwrap InvalidToken
   // the NN mangles these exceptions but the DN does not and may need
   // to re-fetch a token if either report the token is expired


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] hadoop git commit: HDFS-10794. [SPS]: Provide storage policy satisfy worker at DN for co-ordinating the block storage movement work. Contributed by Rakesh R

2016-10-17 Thread umamahesh
HDFS-10794. [SPS]: Provide storage policy satisfy worker at DN for 
co-ordinating the block storage movement work. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8aa5e5ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8aa5e5ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8aa5e5ac

Branch: refs/heads/HDFS-10285
Commit: 8aa5e5ac1ab380298ed0f2d3aabafd3020bae530
Parents: f5d9235
Author: Kai Zheng 
Authored: Wed Sep 14 17:02:11 2016 +0800
Committer: Uma Maheswara Rao G 
Committed: Mon Oct 17 14:47:39 2016 -0700

--
 .../datanode/StoragePolicySatisfyWorker.java| 258 +++
 .../protocol/BlockStorageMovementCommand.java   | 101 
 .../TestStoragePolicySatisfyWorker.java | 159 
 3 files changed, 518 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aa5e5ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
new file mode 100644
index 000..6df4e81
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -0,0 +1,258 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.Socket;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
+import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Daemon;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * StoragePolicySatisfyWorker handles the storage policy satisfier commands.
+ * These commands would be issued from NameNode as part of Datanode's heart 
beat
+ * response. BPOfferService delegates the work to this class for handling
+ * BlockStorageMovement commands.
+ */

[04/50] hadoop git commit: HADOOP-13705. Revert HADOOP-13534 Remove unused TrashPolicy#getInstance and initialize code.

2016-10-17 Thread umamahesh
HADOOP-13705. Revert HADOOP-13534 Remove unused TrashPolicy#getInstance and 
initialize code.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a09bf7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a09bf7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a09bf7c

Branch: refs/heads/HDFS-10285
Commit: 8a09bf7c19d9d2f6d6853d45e11b0d38c7c67f2a
Parents: 4b32b14
Author: Andrew Wang 
Authored: Tue Oct 11 13:46:07 2016 -0700
Committer: Andrew Wang 
Committed: Tue Oct 11 13:46:07 2016 -0700

--
 .../java/org/apache/hadoop/fs/TrashPolicy.java  | 30 
 .../apache/hadoop/fs/TrashPolicyDefault.java| 15 ++
 .../java/org/apache/hadoop/fs/TestTrash.java|  4 +++
 3 files changed, 49 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a09bf7c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
index bd99db4..157b9ab 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
@@ -38,6 +38,17 @@ public abstract class TrashPolicy extends Configured {
 
   /**
* Used to setup the trash policy. Must be implemented by all TrashPolicy
+   * implementations.
+   * @param conf the configuration to be used
+   * @param fs the filesystem to be used
+   * @param home the home directory
+   * @deprecated Use {@link #initialize(Configuration, FileSystem)} instead.
+   */
+  @Deprecated
+  public abstract void initialize(Configuration conf, FileSystem fs, Path 
home);
+
+  /**
+   * Used to setup the trash policy. Must be implemented by all TrashPolicy
* implementations. Different from initialize(conf, fs, home), this one does
* not assume trash always under /user/$USER due to HDFS encryption zone.
* @param conf the configuration to be used
@@ -105,6 +116,25 @@ public abstract class TrashPolicy extends Configured {
*
* @param conf the configuration to be used
* @param fs the file system to be used
+   * @param home the home directory
+   * @return an instance of TrashPolicy
+   * @deprecated Use {@link #getInstance(Configuration, FileSystem)} instead.
+   */
+  @Deprecated
+  public static TrashPolicy getInstance(Configuration conf, FileSystem fs, 
Path home) {
+Class trashClass = conf.getClass(
+"fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
+TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
+trash.initialize(conf, fs, home); // initialize TrashPolicy
+return trash;
+  }
+
+  /**
+   * Get an instance of the configured TrashPolicy based on the value
+   * of the configuration parameter fs.trash.classname.
+   *
+   * @param conf the configuration to be used
+   * @param fs the file system to be used
* @return an instance of TrashPolicy
*/
   public static TrashPolicy getInstance(Configuration conf, FileSystem fs)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a09bf7c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index f4a825c..7be 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -75,6 +75,21 @@ public class TrashPolicyDefault extends TrashPolicy {
 initialize(conf, fs);
   }
 
+  /**
+   * @deprecated Use {@link #initialize(Configuration, FileSystem)} instead.
+   */
+  @Override
+  @Deprecated
+  public void initialize(Configuration conf, FileSystem fs, Path home) {
+this.fs = fs;
+this.deletionInterval = (long)(conf.getFloat(
+FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT)
+* MSECS_PER_MINUTE);
+this.emptierInterval = (long)(conf.getFloat(
+FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
+* MSECS_PER_MINUTE);
+   }
+
   @Override
   public void initialize(Configuration conf, FileSystem fs) {
 this.fs = fs;


[29/50] hadoop git commit: HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin.

2016-10-17 Thread umamahesh
HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a9f6635
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a9f6635
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a9f6635

Branch: refs/heads/HDFS-10285
Commit: 8a9f6635a33e9648e9396e9ec5571fa34aa0c773
Parents: dbe663d
Author: Kihwal Lee 
Authored: Fri Oct 14 11:38:48 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 14 11:38:48 2016 -0500

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a9f6635/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 696b2aa..d856065 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,6 +86,8 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
+DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
+
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -98,6 +100,7 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
+waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -105,7 +108,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -182,7 +185,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForBlocksToDelete();
+waitForNumPendingDeletionBlocks(0);
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -199,7 +202,8 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForBlocksToDelete() throws Exception {
+  private void waitForNumPendingDeletionBlocks(final int numBlocks)
+  throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -207,7 +211,8 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks()
+  == numBlocks) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] hadoop git commit: YARN-5717. Add tests for container-executor is_feature_enabled. Contributed by Sidharta Seethana

2016-10-17 Thread umamahesh
YARN-5717. Add tests for container-executor is_feature_enabled. Contributed by 
Sidharta Seethana


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf3f43e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf3f43e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf3f43e9

Branch: refs/heads/HDFS-10285
Commit: cf3f43e95bf46030875137fc36da5c1fbe14250d
Parents: 0a85d07
Author: Chris Douglas 
Authored: Thu Oct 13 20:47:49 2016 -0700
Committer: Chris Douglas 
Committed: Thu Oct 13 20:49:07 2016 -0700

--
 .../impl/container-executor.c   | 11 ++---
 .../impl/container-executor.h   |  4 ++
 .../main/native/container-executor/impl/main.c  | 42 
 .../test/test-container-executor.c  | 51 
 4 files changed, 79 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf3f43e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index a9a7e96..8a995b1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -422,9 +422,9 @@ int change_user(uid_t user, gid_t group) {
   return 0;
 }
 
-
-static int is_feature_enabled(const char* feature_key, int default_value) {
-char *enabled_str = get_value(feature_key, _cfg);
+int is_feature_enabled(const char* feature_key, int default_value,
+  struct configuration *cfg) {
+char *enabled_str = get_value(feature_key, cfg);
 int enabled = default_value;
 
 if (enabled_str != NULL) {
@@ -448,15 +448,14 @@ static int is_feature_enabled(const char* feature_key, 
int default_value) {
 }
 }
 
-
 int is_docker_support_enabled() {
 return is_feature_enabled(DOCKER_SUPPORT_ENABLED_KEY,
-DEFAULT_DOCKER_SUPPORT_ENABLED);
+DEFAULT_DOCKER_SUPPORT_ENABLED, _cfg);
 }
 
 int is_tc_support_enabled() {
 return is_feature_enabled(TC_SUPPORT_ENABLED_KEY,
-DEFAULT_TC_SUPPORT_ENABLED);
+DEFAULT_TC_SUPPORT_ENABLED, _cfg);
 }
 
 char* check_docker_binary(char *docker_binary) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf3f43e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
index 5c17b29..8ad5d47 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
@@ -264,6 +264,10 @@ int check_dir(const char* npath, mode_t st_mode, mode_t 
desired,
 int create_validate_dir(const char* npath, mode_t perm, const char* path,
int finalComponent);
 
+/** Check if a feature is enabled in the specified configuration. */
+int is_feature_enabled(const char* feature_key, int default_value,
+  struct configuration *cfg);
+
 /** Check if tc (traffic control) support is enabled in configuration. */
 int is_tc_support_enabled();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf3f43e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index 27a269e..47bb3b9 100644
--- 

[33/50] hadoop git commit: HDFS-10960. TestDataNodeHotSwapVolumes#testRemoveVolumeBeingWritten fails at disk error verification after volume remove. (Manoj Govindassamy via lei)

2016-10-17 Thread umamahesh
HDFS-10960. TestDataNodeHotSwapVolumes#testRemoveVolumeBeingWritten fails at 
disk error verification after volume remove. (Manoj Govindassamy via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c520a27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c520a27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c520a27

Branch: refs/heads/HDFS-10285
Commit: 8c520a27cbd9daba05367d3a83017a2eab5258eb
Parents: adb96e1
Author: Lei Xu 
Authored: Fri Oct 14 13:41:59 2016 -0700
Committer: Lei Xu 
Committed: Fri Oct 14 13:41:59 2016 -0700

--
 .../datanode/TestDataNodeHotSwapVolumes.java | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c520a27/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 06387c5..83c231d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -642,8 +642,6 @@ public class TestDataNodeHotSwapVolumes {
 final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
 final FileSystem fs = cluster.getFileSystem();
 final Path testFile = new Path("/test");
-final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck();
-
 FSDataOutputStream out = fs.create(testFile, REPLICATION);
 
 Random rb = new Random(0);
@@ -699,17 +697,24 @@ public class TestDataNodeHotSwapVolumes {
 
 reconfigThread.join();
 
+// Verify if the data directory reconfigure was successful
+FsDatasetSpi fsDatasetSpi = dn.getFSDataset();
+try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi
+.getFsVolumeReferences()) {
+  for (int i =0; i < fsVolumeReferences.size(); i++) {
+System.out.println("Vol: " +
+fsVolumeReferences.get(i).getBaseURI().toString());
+  }
+  assertEquals("Volume remove wasn't successful.",
+  1, fsVolumeReferences.size());
+}
+
 // Verify the file has sufficient replications.
 DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
 // Read the content back
 byte[] content = DFSTestUtil.readFileBuffer(fs, testFile);
 assertEquals(BLOCK_SIZE, content.length);
 
-// If an IOException thrown from BlockReceiver#run, it triggers
-// DataNode#checkDiskError(). So we can test whether checkDiskError() is 
called,
-// to see whether there is IOException in BlockReceiver#run().
-assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck());
-
 if (!exceptions.isEmpty()) {
   throw new IOException(exceptions.get(0).getCause());
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] hadoop git commit: HDFS-10965. Add unit test for HDFS command 'dfsadmin -printTopology'. Contributed by Xiaobing Zhou

2016-10-17 Thread umamahesh
HDFS-10965. Add unit test for HDFS command 'dfsadmin -printTopology'. 
Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ba7092b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ba7092b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ba7092b

Branch: refs/heads/HDFS-10285
Commit: 7ba7092bbcbbccfa24b672414d315656e600096c
Parents: b84c489
Author: Mingliang Liu 
Authored: Tue Oct 11 16:47:39 2016 -0700
Committer: Mingliang Liu 
Committed: Tue Oct 11 17:23:54 2016 -0700

--
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 50 
 1 file changed, 50 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ba7092b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 94ecb9e..b49f73d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.Before;
@@ -364,6 +365,55 @@ public class TestDFSAdmin {
   }
 
   @Test(timeout = 3)
+  public void testPrintTopology() throws Exception {
+redirectStream();
+
+/* init conf */
+final Configuration dfsConf = new HdfsConfiguration();
+final File baseDir = new File(
+PathUtils.getTestDir(getClass()),
+GenericTestUtils.getMethodName());
+dfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, 
baseDir.getAbsolutePath());
+
+final int numDn = 4;
+final String[] racks = {
+"/d1/r1", "/d1/r2",
+"/d2/r1", "/d2/r2"};
+
+/* init cluster using topology */
+try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
+.numDataNodes(numDn).racks(racks).build()) {
+
+  miniCluster.waitActive();
+  assertEquals(numDn, miniCluster.getDataNodes().size());
+  final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+
+  resetStream();
+  final int ret = ToolRunner.run(dfsAdmin, new String[] 
{"-printTopology"});
+
+  /* collect outputs */
+  final List outs = Lists.newArrayList();
+  scanIntoList(out, outs);
+
+  /* verify results */
+  assertEquals(0, ret);
+  assertEquals(
+  "There should be three lines per Datanode: the 1st line is"
+  + " rack info, 2nd node info, 3rd empty line. The total"
+  + " should be as a result of 3 * numDn.",
+  12, outs.size());
+  assertThat(outs.get(0),
+  is(allOf(containsString("Rack:"), containsString("/d1/r1";
+  assertThat(outs.get(3),
+  is(allOf(containsString("Rack:"), containsString("/d1/r2";
+  assertThat(outs.get(6),
+  is(allOf(containsString("Rack:"), containsString("/d2/r1";
+  assertThat(outs.get(9),
+  is(allOf(containsString("Rack:"), containsString("/d2/r2";
+}
+  }
+
+  @Test(timeout = 3)
   public void testNameNodeGetReconfigurationStatus() throws IOException,
   InterruptedException, TimeoutException {
 ReconfigurationUtil ru = mock(ReconfigurationUtil.class);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] hadoop git commit: HADOOP-13698. Document caveat for KeyShell when underlying KeyProvider does not delete a key.

2016-10-17 Thread umamahesh
HADOOP-13698. Document caveat for KeyShell when underlying KeyProvider does not 
delete a key.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b84c4891
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b84c4891
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b84c4891

Branch: refs/heads/HDFS-10285
Commit: b84c4891f9eca8d56593e48e9df88be42e24220d
Parents: 3c9a010
Author: Xiao Chen 
Authored: Tue Oct 11 17:05:00 2016 -0700
Committer: Xiao Chen 
Committed: Tue Oct 11 17:05:00 2016 -0700

--
 .../hadoop-common/src/site/markdown/CommandsManual.md| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b84c4891/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 4d7d504..2ece71a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -202,7 +202,9 @@ Manage keys via the KeyProvider. For details on 
KeyProviders, see the [Transpare
 
 Providers frequently require that a password or other secret is supplied. If 
the provider requires a password and is unable to find one, it will use a 
default password and emit a warning message that the default password is being 
used. If the `-strict` flag is supplied, the warning message becomes an error 
message and the command returns immediately with an error status.
 
-NOTE: Some KeyProviders (e.g. 
org.apache.hadoop.crypto.key.JavaKeyStoreProvider) does not support uppercase 
key names.
+NOTE: Some KeyProviders (e.g. 
org.apache.hadoop.crypto.key.JavaKeyStoreProvider) do not support uppercase key 
names.
+
+NOTE: Some KeyProviders do not directly execute a key deletion (e.g. performs 
a soft-delete instead, or delay the actual deletion, to prevent mistake). In 
these cases, one may encounter errors when creating/deleting a key with the 
same name after deleting it. Please check the underlying KeyProvider for 
details.
 
 ### `trace`
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] hadoop git commit: HDFS-11002. Fix broken attr/getfattr/setfattr links in ExtendedAttributes.md. Contributed by Mingliang Liu.

2016-10-17 Thread umamahesh
HDFS-11002. Fix broken attr/getfattr/setfattr links in ExtendedAttributes.md. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/901eca00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/901eca00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/901eca00

Branch: refs/heads/HDFS-10285
Commit: 901eca004d0e7e413b109a93128892176c808d61
Parents: 12d739a
Author: Akira Ajisaka 
Authored: Thu Oct 13 14:29:30 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Oct 13 14:29:30 2016 +0900

--
 .../hadoop-hdfs/src/site/markdown/ExtendedAttributes.md  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/901eca00/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
index 5a20986..eb527ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ExtendedAttributes.md
@@ -30,7 +30,7 @@ Overview
 
 ### HDFS extended attributes
 
-Extended attributes in HDFS are modeled after extended attributes in Linux 
(see the Linux manpage for 
[attr(5)](http://www.bestbits.at/acl/man/man5/attr.txt) and [related 
documentation](http://www.bestbits.at/acl/)). An extended attribute is a 
*name-value pair*, with a string name and binary value. Xattrs names must also 
be prefixed with a *namespace*. For example, an xattr named *myXattr* in the 
*user* namespace would be specified as **user.myXattr**. Multiple xattrs can be 
associated with a single inode.
+Extended attributes in HDFS are modeled after extended attributes in Linux 
(see the Linux manpage for 
[attr(5)](http://man7.org/linux/man-pages/man5/attr.5.html)). An extended 
attribute is a *name-value pair*, with a string name and binary value. Xattrs 
names must also be prefixed with a *namespace*. For example, an xattr named 
*myXattr* in the *user* namespace would be specified as **user.myXattr**. 
Multiple xattrs can be associated with a single inode.
 
 ### Namespaces and Permissions
 
@@ -49,7 +49,7 @@ The `raw` namespace is reserved for internal system 
attributes that sometimes ne
 Interacting with extended attributes
 
 
-The Hadoop shell has support for interacting with extended attributes via 
`hadoop fs -getfattr` and `hadoop fs -setfattr`. These commands are styled 
after the Linux [getfattr(1)](http://www.bestbits.at/acl/man/man1/getfattr.txt) 
and [setfattr(1)](http://www.bestbits.at/acl/man/man1/setfattr.txt) commands.
+The Hadoop shell has support for interacting with extended attributes via 
`hadoop fs -getfattr` and `hadoop fs -setfattr`. These commands are styled 
after the Linux 
[getfattr(1)](http://man7.org/linux/man-pages/man1/getfattr.1.html) and 
[setfattr(1)](http://man7.org/linux/man-pages/man1/setfattr.1.html) commands.
 
 ### getfattr
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] hadoop git commit: HADOOP-13417. Fix javac and checkstyle warnings in hadoop-auth package.

2016-10-17 Thread umamahesh
HADOOP-13417. Fix javac and checkstyle warnings in hadoop-auth package.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a5a7247
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a5a7247
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a5a7247

Branch: refs/heads/HDFS-10285
Commit: 5a5a724731b74df9eed2de5f3370bcb8023fa2eb
Parents: d9f73f1
Author: Akira Ajisaka 
Authored: Fri Oct 14 14:45:55 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Oct 14 14:45:55 2016 +0900

--
 .../client/AuthenticatorTestCase.java   | 49 
 1 file changed, 29 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a5a7247/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
index 8f35e13..35e40d8 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
@@ -20,14 +20,15 @@ import 
org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.http.HttpResponse;
 import org.apache.http.auth.AuthScope;
 import org.apache.http.auth.Credentials;
+import org.apache.http.client.CredentialsProvider;
 import org.apache.http.client.HttpClient;
 import org.apache.http.client.methods.HttpGet;
 import org.apache.http.client.methods.HttpPost;
 import org.apache.http.client.methods.HttpUriRequest;
-import org.apache.http.client.params.AuthPolicy;
 import org.apache.http.entity.InputStreamEntity;
-import org.apache.http.impl.auth.SPNegoSchemeFactory;
-import org.apache.http.impl.client.SystemDefaultHttpClient;
+import org.apache.http.impl.auth.SPNegoScheme;
+import org.apache.http.impl.client.BasicCredentialsProvider;
+import org.apache.http.impl.client.HttpClientBuilder;
 import org.apache.http.util.EntityUtils;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.servlet.Context;
@@ -53,6 +54,7 @@ import java.net.ServerSocket;
 import java.net.URL;
 import java.security.Principal;
 import java.util.Properties;
+
 import org.junit.Assert;
 
 public class AuthenticatorTestCase {
@@ -241,22 +243,29 @@ public class AuthenticatorTestCase {
 }
   }
 
-  private SystemDefaultHttpClient getHttpClient() {
-final SystemDefaultHttpClient httpClient = new SystemDefaultHttpClient();
-httpClient.getAuthSchemes().register(AuthPolicy.SPNEGO, new 
SPNegoSchemeFactory(true));
- Credentials use_jaas_creds = new Credentials() {
-   public String getPassword() {
- return null;
-   }
-
-   public Principal getUserPrincipal() {
- return null;
-   }
- };
-
- httpClient.getCredentialsProvider().setCredentials(
-   AuthScope.ANY, use_jaas_creds);
- return httpClient;
+  private HttpClient getHttpClient() {
+HttpClientBuilder builder = HttpClientBuilder.create();
+// Register auth schema
+builder.setDefaultAuthSchemeRegistry(
+s-> httpContext -> new SPNegoScheme(true, true)
+);
+
+Credentials useJaasCreds = new Credentials() {
+  public String getPassword() {
+return null;
+  }
+  public Principal getUserPrincipal() {
+return null;
+  }
+};
+
+CredentialsProvider jaasCredentialProvider
+= new BasicCredentialsProvider();
+jaasCredentialProvider.setCredentials(AuthScope.ANY, useJaasCreds);
+// Set credential provider
+builder.setDefaultCredentialsProvider(jaasCredentialProvider);
+
+return builder.build();
   }
 
   private void doHttpClientRequest(HttpClient httpClient, HttpUriRequest 
request) throws Exception {
@@ -273,7 +282,7 @@ public class AuthenticatorTestCase {
   protected void _testAuthenticationHttpClient(Authenticator authenticator, 
boolean doPost) throws Exception {
 start();
 try {
-  SystemDefaultHttpClient httpClient = getHttpClient();
+  HttpClient httpClient = getHttpClient();
   doHttpClientRequest(httpClient, new HttpGet(getBaseURL()));
 
   // Always do a GET before POST to trigger the SPNego negotiation


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

[05/50] hadoop git commit: HDFS-10991. Export hdfsTruncateFile symbol in libhdfs. Contributed by Surendra Singh Lilhore.

2016-10-17 Thread umamahesh
HDFS-10991. Export hdfsTruncateFile symbol in libhdfs. Contributed by Surendra 
Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dacd3ec6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dacd3ec6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dacd3ec6

Branch: refs/heads/HDFS-10285
Commit: dacd3ec66b111be24131957c986f0c748cf9ea26
Parents: 8a09bf7
Author: Andrew Wang 
Authored: Tue Oct 11 15:07:14 2016 -0700
Committer: Andrew Wang 
Committed: Tue Oct 11 15:07:14 2016 -0700

--
 .../src/main/native/libhdfs/include/hdfs/hdfs.h | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dacd3ec6/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
index c856928..83c1c59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
@@ -493,6 +493,7 @@ extern  "C" {
  * complete before proceeding with further file updates.
  * -1 on error.
  */
+LIBHDFS_EXTERNAL
 int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength);
 
 /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] hadoop git commit: HDFS-10916. Switch from "raw" to "system" xattr namespace for erasure coding policy. (Andrew Wang via lei) [Forced Update!]

2016-10-17 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 a88d66e25 -> 13a805035 (forced update)


HDFS-10916. Switch from "raw" to "system" xattr namespace for erasure coding 
policy. (Andrew Wang via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/809cfd27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/809cfd27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/809cfd27

Branch: refs/heads/HDFS-10285
Commit: 809cfd27a30900d2c0e0e133574de49d0b4538cf
Parents: ecb51b8
Author: Lei Xu 
Authored: Tue Oct 11 10:04:46 2016 -0700
Committer: Lei Xu 
Committed: Tue Oct 11 10:04:46 2016 -0700

--
 .../org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/809cfd27/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 3798394..d112a48 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -369,7 +369,7 @@ public interface HdfsServerConstants {
   String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER =
   "security.hdfs.unreadable.by.superuser";
   String XATTR_ERASURECODING_POLICY =
-  "raw.hdfs.erasurecoding.policy";
+  "system.hdfs.erasurecoding.policy";
 
   long BLOCK_GROUP_INDEX_MASK = 15;
   byte MAX_BLOCKS_IN_GROUP = 16;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.

2016-10-17 Thread umamahesh
HDFS-10987. Make Decommission less expensive when lot of blocks present. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332a61fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332a61fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332a61fd

Branch: refs/heads/HDFS-10285
Commit: 332a61fd74fd2a9874319232c583ab5d2c53ff03
Parents: fdce515
Author: Kihwal Lee 
Authored: Thu Oct 13 13:52:49 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Oct 13 13:52:49 2016 -0500

--
 .../blockmanagement/DecommissionManager.java| 29 +++-
 1 file changed, 28 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332a61fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 6436fab..87b36da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -389,6 +389,10 @@ public class DecommissionManager {
  */
 private int numBlocksChecked = 0;
 /**
+ * The number of blocks checked after (re)holding lock.
+ */
+private int numBlocksCheckedPerLock = 0;
+/**
  * The number of nodes that have been checked on this tick. Used for 
  * statistics.
  */
@@ -418,6 +422,7 @@ public class DecommissionManager {
   }
   // Reset the checked count at beginning of each iteration
   numBlocksChecked = 0;
+  numBlocksCheckedPerLock = 0;
   numNodesChecked = 0;
   // Check decom progress
   namesystem.writeLock();
@@ -451,7 +456,8 @@ public class DecommissionManager {
   iterkey).iterator();
   final LinkedList toRemove = new LinkedList<>();
 
-  while (it.hasNext() && !exceededNumBlocksPerCheck()) {
+  while (it.hasNext() && !exceededNumBlocksPerCheck() && namesystem
+  .isRunning()) {
 numNodesChecked++;
 final Map.Entry
 entry = it.next();
@@ -577,7 +583,28 @@ public class DecommissionManager {
   int decommissionOnlyReplicas = 0;
   int lowRedundancyInOpenFiles = 0;
   while (it.hasNext()) {
+if (insufficientList == null
+&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
+  // During fullscan insufficientlyReplicated will NOT be null, 
iterator
+  // will be DN's iterator. So should not yield lock, otherwise
+  // ConcurrentModificationException could occur.
+  // Once the fullscan done, iterator will be a copy. So can yield the
+  // lock.
+  // Yielding is required in case of block number is greater than the
+  // configured per-iteration-limit.
+  namesystem.writeUnlock();
+  try {
+LOG.debug("Yielded lock during decommission check");
+Thread.sleep(0, 500);
+  } catch (InterruptedException ignored) {
+return;
+  }
+  // reset
+  numBlocksCheckedPerLock = 0;
+  namesystem.writeLock();
+}
 numBlocksChecked++;
+numBlocksCheckedPerLock++;
 final BlockInfo block = it.next();
 // Remove the block from the list if it's no longer in the block map,
 // e.g. the containing file has been deleted


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] hadoop git commit: HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception stacktrace. Contributed by Hanisha Koneru.

2016-10-17 Thread umamahesh
HADOOP-13710. Supress CachingGetSpaceUsed from logging interrupted exception 
stacktrace. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/008122b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/008122b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/008122b3

Branch: refs/heads/HDFS-10285
Commit: 008122b3c927767ac96dc876124bc591e10c9df4
Parents: 9097e2e
Author: Arpit Agarwal 
Authored: Thu Oct 13 11:37:03 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Oct 13 11:37:03 2016 -0700

--
 .../src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/008122b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
index 505f76d..a2b6980 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
@@ -177,7 +177,8 @@ public abstract class CachingGetSpaceUsed implements 
Closeable, GetSpaceUsed {
   // update the used variable
   spaceUsed.refresh();
 } catch (InterruptedException e) {
-  LOG.warn("Thread Interrupted waiting to refresh disk information", 
e);
+  LOG.warn("Thread Interrupted waiting to refresh disk information: "
+  + e.getMessage());
   Thread.currentThread().interrupt();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] hadoop git commit: HADOOP-13565. KerberosAuthenticationHandler#authenticate should not rebuild SPN based on client request. Contributed by Xiaoyu Yao.

2016-10-17 Thread umamahesh
HADOOP-13565. KerberosAuthenticationHandler#authenticate should not rebuild SPN 
based on client request. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9097e2ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9097e2ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9097e2ef

Branch: refs/heads/HDFS-10285
Commit: 9097e2efe4c92d83c8fab88dc11be84505a6cab5
Parents: b371c56
Author: Xiaoyu Yao 
Authored: Thu Oct 13 10:52:13 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Oct 13 10:52:28 2016 -0700

--
 .../authentication/server/KerberosAuthenticationHandler.java  | 7 +--
 1 file changed, 1 insertion(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9097e2ef/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index c6d1881..07c2a31 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -343,8 +343,6 @@ public class KerberosAuthenticationHandler implements 
AuthenticationHandler {
   authorization = 
authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
   final Base64 base64 = new Base64(0);
   final byte[] clientToken = base64.decode(authorization);
-  final String serverName = InetAddress.getByName(request.getServerName())
-   .getCanonicalHostName();
   try {
 token = Subject.doAs(serverSubject, new 
PrivilegedExceptionAction() {
 
@@ -354,10 +352,7 @@ public class KerberosAuthenticationHandler implements 
AuthenticationHandler {
 GSSContext gssContext = null;
 GSSCredential gssCreds = null;
 try {
-  gssCreds = gssManager.createCredential(
-  gssManager.createName(
-  KerberosUtil.getServicePrincipal("HTTP", serverName),
-  KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
+  gssCreds = gssManager.createCredential(null,
   GSSCredential.INDEFINITE_LIFETIME,
   new Oid[]{
 KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] hadoop git commit: Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. Contributed by Yiqun Lin."

2016-10-17 Thread umamahesh
Revert "HDFS-10990. TestPendingInvalidateBlock should wait for IBRs. 
Contributed by Yiqun Lin."

This reverts commit fdce515091f0a61ffd6c9ae464a68447dedf1124.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c721aa0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c721aa0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c721aa0

Branch: refs/heads/HDFS-10285
Commit: 8c721aa00a47a976959e3861ddd742f09db432fc
Parents: 332a61f
Author: Andrew Wang 
Authored: Thu Oct 13 13:23:12 2016 -0700
Committer: Andrew Wang 
Committed: Thu Oct 13 13:23:28 2016 -0700

--
 .../blockmanagement/TestPendingInvalidateBlock.java| 13 -
 1 file changed, 4 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c721aa0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 19f3178..696b2aa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -86,8 +86,6 @@ public class TestPendingInvalidateBlock {
   public void testPendingDeletion() throws Exception {
 final Path foo = new Path("/foo");
 DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
-DFSTestUtil.waitForReplication(dfs, foo, REPLICATION, 1);
-
 // restart NN
 cluster.restartNameNode(true);
 InvalidateBlocks invalidateBlocks =
@@ -100,7 +98,6 @@ public class TestPendingInvalidateBlock {
 "invalidateBlocks", mockIb);
 dfs.delete(foo, true);
 
-waitForNumPendingDeletionBlocks(REPLICATION);
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(REPLICATION, cluster.getNamesystem()
 .getPendingDeletionBlocks());
@@ -108,7 +105,7 @@ public class TestPendingInvalidateBlock {
 dfs.getPendingDeletionBlocksCount());
 Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
-waitForNumPendingDeletionBlocks(0);
+waitForBlocksToDelete();
 Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
 Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
@@ -185,7 +182,7 @@ public class TestPendingInvalidateBlock {
 Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
 cluster.restartNameNode(true);
-waitForNumPendingDeletionBlocks(0);
+waitForBlocksToDelete();
 Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
 Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
@@ -202,8 +199,7 @@ public class TestPendingInvalidateBlock {
 return cluster.getNamesystem().getUnderReplicatedBlocks();
   }
 
-  private void waitForNumPendingDeletionBlocks(int numBlocks)
-  throws Exception {
+  private void waitForBlocksToDelete() throws Exception {
 GenericTestUtils.waitFor(new Supplier() {
 
   @Override
@@ -211,8 +207,7 @@ public class TestPendingInvalidateBlock {
 try {
   cluster.triggerBlockReports();
 
-  if (cluster.getNamesystem().getPendingDeletionBlocks()
-  == numBlocks) {
+  if (cluster.getNamesystem().getPendingDeletionBlocks() == 0) {
 return true;
   }
 } catch (Exception e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] hadoop git commit: HADOOP-13024. Distcp with -delete feature on raw data not implemented. Contributed by Mavin Martin.

2016-10-17 Thread umamahesh
HADOOP-13024. Distcp with -delete feature on raw data not implemented. 
Contributed by Mavin Martin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a85d079
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a85d079
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a85d079

Branch: refs/heads/HDFS-10285
Commit: 0a85d079838f532a13ca237300386d1b3bc1b178
Parents: 8c721aa
Author: Jing Zhao 
Authored: Thu Oct 13 13:24:37 2016 -0700
Committer: Jing Zhao 
Committed: Thu Oct 13 13:24:54 2016 -0700

--
 .../apache/hadoop/tools/DistCpConstants.java| 12 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |  5 ++-
 .../hadoop/tools/TestDistCpWithRawXAttrs.java   | 45 +---
 .../hadoop/tools/util/DistCpTestUtils.java  | 32 --
 4 files changed, 56 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index 96f364c..6171aa9 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -18,6 +18,8 @@ package org.apache.hadoop.tools;
  * limitations under the License.
  */
 
+import org.apache.hadoop.fs.Path;
+
 /**
  * Utility class to hold commonly used constants.
  */
@@ -125,9 +127,17 @@ public class DistCpConstants {
   public static final int SPLIT_RATIO_DEFAULT  = 2;
 
   /**
+   * Constants for NONE file deletion
+   */
+  public static final String NONE_PATH_NAME = "/NONE";
+  public static final Path NONE_PATH = new Path(NONE_PATH_NAME);
+  public static final Path RAW_NONE_PATH = new Path(
+  DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME + NONE_PATH_NAME);
+
+  /**
* Value of reserved raw HDFS directory when copying raw.* xattrs.
*/
-  static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = "/.reserved/raw";
+  public static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = 
"/.reserved/raw";
 
   static final String HDFS_DISTCP_DIFF_DIRECTORY_NAME = ".distcp.diff.tmp";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 6d2fef5..dd653b2 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -238,7 +238,10 @@ public class CopyCommitter extends FileOutputCommitter {
 List targets = new ArrayList(1);
 Path targetFinalPath = new 
Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
 targets.add(targetFinalPath);
-DistCpOptions options = new DistCpOptions(targets, new Path("/NONE"));
+Path resultNonePath = 
Path.getPathWithoutSchemeAndAuthority(targetFinalPath)
+
.toString().startsWith(DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME)
+? DistCpConstants.RAW_NONE_PATH : DistCpConstants.NONE_PATH;
+DistCpOptions options = new DistCpOptions(targets, resultNonePath);
 //
 // Set up options to be the same from the CopyListing.buildListing's 
perspective,
 // so to collect similar listings as when doing the copy

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a85d079/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
index 5aef51a..8adc2cf 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
@@ -82,14 +82,7 @@ public class TestDistCpWithRawXAttrs {
 final String relDst = "/./.reserved/../.reserved/raw/../raw/dest/../dest";
 doTestPreserveRawXAttrs(relSrc, relDst, "-px", true, true,
 DistCpConstants.SUCCESS);
-

[06/50] hadoop git commit: HDFS-10984. Expose nntop output as metrics. Contributed by Siddharth Wagle.

2016-10-17 Thread umamahesh
HDFS-10984. Expose nntop output as metrics. Contributed by Siddharth Wagle.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61f0490a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61f0490a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61f0490a

Branch: refs/heads/HDFS-10285
Commit: 61f0490a73085bbaf6639d9234277e59dc1145db
Parents: dacd3ec
Author: Xiaoyu Yao 
Authored: Tue Oct 11 15:55:02 2016 -0700
Committer: Xiaoyu Yao 
Committed: Tue Oct 11 15:55:02 2016 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  |  6 ++
 .../server/namenode/top/metrics/TopMetrics.java | 67 ++--
 .../server/namenode/metrics/TestTopMetrics.java | 63 ++
 3 files changed, 129 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61f0490a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2471dc8..b9b02ef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -89,6 +89,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*;
 import static org.apache.hadoop.util.Time.now;
 import static org.apache.hadoop.util.Time.monotonicNow;
+import static 
org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics.TOPMETRICS_METRICS_SOURCE_NAME;
 
 import java.io.BufferedWriter;
 import java.io.ByteArrayInputStream;
@@ -989,6 +990,11 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 // Add audit logger to calculate top users
 if (topConf.isEnabled) {
   topMetrics = new TopMetrics(conf, topConf.nntopReportingPeriodsMs);
+  if (DefaultMetricsSystem.instance().getSource(
+  TOPMETRICS_METRICS_SOURCE_NAME) == null) {
+
DefaultMetricsSystem.instance().register(TOPMETRICS_METRICS_SOURCE_NAME,
+"Top N operations by user", topMetrics);
+  }
   auditLoggers.add(new TopAuditLogger(topMetrics));
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61f0490a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
index ab55392..2719c88 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
@@ -17,24 +17,32 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.top.metrics;
 
-import java.net.InetAddress;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
 import com.google.common.collect.Lists;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
 import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
+import 
org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op;
+import 
org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.User;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.net.InetAddress;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
 import static 
org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
 
 /**
@@ -58,8 +66,11 @@ import static 

[1/3] hadoop git commit: HDFS-10922. Adding additional unit tests for Trash (II). Contributed by Weiwei Yang.

2016-10-17 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 15ff590c3 -> f8b2d7720


HDFS-10922. Adding additional unit tests for Trash (II). Contributed by Weiwei 
Yang.

(cherry picked from commit f1802d0be05ecc0b3248690b6f9efedbc7784112)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8b2d772
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8b2d772
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8b2d772

Branch: refs/heads/branch-2.8
Commit: f8b2d77207bb69785694d52f5c0b24f0bead3271
Parents: 36dad0a
Author: Xiaoyu Yao 
Authored: Mon Oct 17 08:22:31 2016 -0700
Committer: Xiaoyu Yao 
Committed: Mon Oct 17 14:04:58 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  40 +
 .../apache/hadoop/hdfs/TestDFSPermission.java   |  23 +--
 .../org/apache/hadoop/hdfs/TestHDFSTrash.java   | 145 ++-
 3 files changed, 186 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8b2d772/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 5a2f524..67e9e54 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -162,6 +162,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -1930,4 +1931,43 @@ public class DFSTestUtil {
 lastBlock.setNumBytes(len);
 return lastBlock;
   }
+
+  /**
+   * Close current file system and create a new instance as given
+   * {@link UserGroupInformation}.
+   */
+  public static FileSystem login(final FileSystem fs,
+  final Configuration conf, final UserGroupInformation ugi)
+  throws IOException, InterruptedException {
+if (fs != null) {
+  fs.close();
+}
+return DFSTestUtil.getFileSystemAs(ugi, conf);
+  }
+
+  /**
+   * Test if the given {@link FileStatus} user, group owner and its permission
+   * are expected, throw {@link AssertionError} if any value is not expected.
+   */
+  public static void verifyFilePermission(FileStatus stat, String owner,
+  String group, FsAction u, FsAction g, FsAction o) {
+if(stat != null) {
+  if(!Strings.isNullOrEmpty(owner)) {
+assertEquals(owner, stat.getOwner());
+  }
+  if(!Strings.isNullOrEmpty(group)) {
+assertEquals(group, stat.getGroup());
+  }
+  FsPermission permission = stat.getPermission();
+  if(u != null) {
+assertEquals(u, permission.getUserAction());
+  }
+  if (g != null) {
+assertEquals(g, permission.getGroupAction());
+  }
+  if (o != null) {
+assertEquals(o, permission.getOtherAction());
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8b2d772/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index e6524f3..0fe304d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
@@ -287,7 +288,7 @@ public class TestDFSPermission {
 fs.setPermission(new Path("/"),
 FsPermission.createImmutable((short)0777));
   }
-  
+
   /* check if the ownership of a file/directory is set correctly */
   @Test
   public void testOwnership() throws Exception {
@@ -324,7 +325,7 @@ public class TestDFSPermission {
 setOwner(FILE_DIR_PATH, USER1.getShortUserName(), GROUP3_NAME, false);
 
 // case 3: user1 changes 

[3/3] hadoop git commit: HADOOP-12984. Add GenericTestUtils.getTestDir method and use it for emporary directory in tests (Contributed by Steve Loughran and Vinayakumar B

2016-10-17 Thread xyao
HADOOP-12984. Add GenericTestUtils.getTestDir method and use it for emporary 
directory in tests (Contributed by Steve Loughran and Vinayakumar B

This closes #89

(cherry picked from commit 8d29e2451f5ca60f864c7ece16722c0abdd1c657)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36dad0ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36dad0ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36dad0ab

Branch: refs/heads/branch-2.8
Commit: 36dad0abca87e5b4bcb8f6c5635afab54288ce40
Parents: 15ff590
Author: Vinayakumar B 
Authored: Thu Apr 7 10:12:00 2016 +0530
Committer: Xiaoyu Yao 
Committed: Mon Oct 17 14:04:58 2016 -0700

--
 .../apache/hadoop/conf/TestConfiguration.java   |  8 +-
 .../crypto/TestCryptoStreamsForLocalFS.java |  5 +-
 .../apache/hadoop/crypto/key/TestKeyShell.java  |  5 +-
 .../org/apache/hadoop/fs/FSTestWrapper.java |  3 +-
 .../fs/FileContextMainOperationsBaseTest.java   |  4 +-
 .../apache/hadoop/fs/FileContextTestHelper.java |  6 +-
 .../apache/hadoop/fs/FileContextURIBase.java|  6 +-
 .../apache/hadoop/fs/FileSystemTestHelper.java  |  4 +-
 .../org/apache/hadoop/fs/TestAvroFSInput.java   |  9 +--
 .../hadoop/fs/TestChecksumFileSystem.java   |  5 +-
 .../org/apache/hadoop/fs/TestDFVariations.java  |  2 +-
 .../test/java/org/apache/hadoop/fs/TestDU.java  |  4 +-
 .../hadoop/fs/TestFileContextResolveAfs.java|  8 +-
 .../java/org/apache/hadoop/fs/TestFileUtil.java | 13 ++-
 .../org/apache/hadoop/fs/TestFsShellCopy.java   |  6 +-
 .../apache/hadoop/fs/TestFsShellReturnCode.java |  8 +-
 .../org/apache/hadoop/fs/TestFsShellTouch.java  |  6 +-
 .../hadoop/fs/TestGetFileBlockLocations.java|  5 +-
 .../hadoop/fs/TestHarFileSystemBasics.java  |  5 +-
 .../java/org/apache/hadoop/fs/TestHardLink.java |  5 +-
 .../org/apache/hadoop/fs/TestListFiles.java | 14 ++--
 .../apache/hadoop/fs/TestLocalFileSystem.java   |  9 ++-
 .../fs/TestLocalFileSystemPermission.java   | 18 ++---
 .../java/org/apache/hadoop/fs/TestPath.java |  6 +-
 .../java/org/apache/hadoop/fs/TestTrash.java|  6 +-
 .../apache/hadoop/fs/TestTruncatedInputBug.java |  4 +-
 .../hadoop/fs/sftp/TestSFTPFileSystem.java  |  3 +-
 .../apache/hadoop/fs/shell/TestPathData.java|  5 +-
 .../apache/hadoop/fs/shell/TestTextCommand.java |  4 +-
 .../hadoop/fs/viewfs/TestViewfsFileStatus.java  |  7 +-
 .../apache/hadoop/ha/ClientBaseWithFixes.java   |  4 +-
 .../http/TestAuthenticationSessionCookie.java   |  5 +-
 .../apache/hadoop/http/TestHttpCookieFlag.java  |  5 +-
 .../hadoop/http/TestHttpServerLifecycle.java|  5 +-
 .../apache/hadoop/http/TestSSLHttpServer.java   |  5 +-
 .../org/apache/hadoop/io/TestArrayFile.java |  6 +-
 .../org/apache/hadoop/io/TestBloomMapFile.java  |  6 +-
 .../java/org/apache/hadoop/io/TestMapFile.java  |  6 +-
 .../org/apache/hadoop/io/TestSequenceFile.java  | 48 +--
 .../hadoop/io/TestSequenceFileAppend.java   |  4 +-
 .../io/TestSequenceFileSerialization.java   |  4 +-
 .../apache/hadoop/io/TestSequenceFileSync.java  |  5 +-
 .../java/org/apache/hadoop/io/TestSetFile.java  |  5 +-
 .../apache/hadoop/io/compress/TestCodec.java| 23 +++---
 .../apache/hadoop/io/file/tfile/TestTFile.java  |  4 +-
 .../io/file/tfile/TestTFileByteArrays.java  |  4 +-
 .../io/file/tfile/TestTFileComparator2.java |  4 +-
 .../io/file/tfile/TestTFileComparators.java |  5 +-
 .../hadoop/io/file/tfile/TestTFileSeek.java |  4 +-
 .../file/tfile/TestTFileSeqFileComparison.java  |  5 +-
 .../hadoop/io/file/tfile/TestTFileSplit.java|  4 +-
 .../hadoop/io/file/tfile/TestTFileStreams.java  |  4 +-
 .../file/tfile/TestTFileUnsortedByteArrays.java |  5 +-
 .../apache/hadoop/io/file/tfile/TestVLong.java  |  4 +-
 .../apache/hadoop/io/nativeio/TestNativeIO.java | 10 +--
 .../TestSharedFileDescriptorFactory.java|  4 +-
 .../apache/hadoop/security/TestCredentials.java |  4 +-
 .../hadoop/security/TestLdapGroupsMapping.java  |  8 +-
 .../hadoop/security/alias/TestCredShell.java|  5 +-
 .../alias/TestCredentialProviderFactory.java|  5 +-
 .../hadoop/security/ssl/KeyStoreTestUtil.java   |  4 +-
 .../ssl/TestReloadingX509TrustManager.java  |  5 +-
 .../hadoop/security/ssl/TestSSLFactory.java |  6 +-
 .../apache/hadoop/test/GenericTestUtils.java| 84 +++-
 .../java/org/apache/hadoop/util/JarFinder.java  |  4 +-
 .../hadoop/util/TestApplicationClassLoader.java |  4 +-
 .../org/apache/hadoop/util/TestClasspath.java   |  5 +-
 .../hadoop/util/TestGenericOptionsParser.java   |  2 +-
 .../apache/hadoop/util/TestHostsFileReader.java |  4 +-
 .../org/apache/hadoop/util/TestJarFinder.java   |  9 ++-
 .../java/org/apache/hadoop/util/TestRunJar.java |  8 +-
 .../java/org/apache/hadoop/util/TestShell.java  |  4 +-
 

[2/3] hadoop git commit: HADOOP-12984. Add GenericTestUtils.getTestDir method and use it for emporary directory in tests (Contributed by Steve Loughran and Vinayakumar B

2016-10-17 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/36dad0ab/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileStreams.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileStreams.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileStreams.java
index 6524c37..a108408 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileStreams.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileStreams.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.file.tfile.TFile.Reader;
 import org.apache.hadoop.io.file.tfile.TFile.Writer;
 import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner;
+import org.apache.hadoop.test.GenericTestUtils;
 
 /**
  * 
@@ -46,8 +47,7 @@ import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner;
  */
 
 public class TestTFileStreams {
-  private static String ROOT =
-  System.getProperty("test.build.data", "/tmp/tfile-test");
+  private static String ROOT = GenericTestUtils.getTestDir().getAbsolutePath();
 
   private final static int BLOCK_SIZE = 512;
   private final static int K = 1024;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36dad0ab/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java
index 235e5e4..f243b2a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java
@@ -29,13 +29,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.file.tfile.TFile.Reader;
 import org.apache.hadoop.io.file.tfile.TFile.Writer;
 import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Before;
 import org.junit.Test;
 
 public class TestTFileUnsortedByteArrays {
-  private static String ROOT =
-  System.getProperty("test.build.data", "/tmp/tfile-test");
-
+  private static String ROOT = GenericTestUtils.getTestDir().getAbsolutePath();
 
   private final static int BLOCK_SIZE = 512;
   private final static int BUF_SIZE = 64;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36dad0ab/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestVLong.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestVLong.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestVLong.java
index 9efd271..69e6eb8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestVLong.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestVLong.java
@@ -29,12 +29,12 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Before;
 import org.junit.Test;
 
 public class TestVLong {
-  private static String ROOT =
-  System.getProperty("test.build.data", "/tmp/tfile-test");
+  private static String ROOT = GenericTestUtils.getTestDir().getAbsolutePath();
   private Configuration conf;
   private FileSystem fs;
   private Path path;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36dad0ab/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
index 13fdbc1..e6f25dc 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
@@ -61,8 +61,7 @@ import static 
org.apache.hadoop.io.nativeio.NativeIO.POSIX.Stat.*;
 public class TestNativeIO {
   static final Log LOG = LogFactory.getLog(TestNativeIO.class);
 
-  static final File TEST_DIR = new File(
-

hadoop git commit: YARN-5466. DefaultContainerExecutor needs JavaDocs (templedf via rkanter)

2016-10-17 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 61aceb87d -> e02c756f1


YARN-5466. DefaultContainerExecutor needs JavaDocs (templedf via rkanter)

(cherry picked from commit f5d92359145dfb820a9521e00e2d44c4ee96e67e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e02c756f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e02c756f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e02c756f

Branch: refs/heads/branch-2
Commit: e02c756f16d1dc896f9315934192cc19362b842a
Parents: 61aceb8
Author: Robert Kanter 
Authored: Mon Oct 17 14:29:09 2016 -0700
Committer: Robert Kanter 
Committed: Mon Oct 17 14:29:42 2016 -0700

--
 .../nodemanager/DefaultContainerExecutor.java   | 272 ---
 .../WindowsSecureContainerExecutor.java |   2 +-
 2 files changed, 231 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e02c756f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index 59b69ac..568c80b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -65,6 +65,11 @@ import 
org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 
+/**
+ * The {@code DefaultContainerExecuter} class offers generic container
+ * execution services. Process execution is handled in a platform-independent
+ * way via {@link ProcessBuilder}.
+ */
 public class DefaultContainerExecutor extends ContainerExecutor {
 
   private static final Log LOG = LogFactory
@@ -72,10 +77,17 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 
   private static final int WIN_MAX_PATH = 260;
 
+  /**
+   * A {@link FileContext} for the local file system.
+   */
   protected final FileContext lfs;
 
   private String logDirPermissions = null;
 
+  /**
+   * Default constructor for use in testing.
+   */
+  @VisibleForTesting
   public DefaultContainerExecutor() {
 try {
   this.lfs = FileContext.getLocalFSFileContext();
@@ -84,15 +96,40 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 }
   }
 
+  /**
+   * Create an instance with a given {@link FileContext}.
+   *
+   * @param lfs the given {@link FileContext}
+   */
   DefaultContainerExecutor(FileContext lfs) {
 this.lfs = lfs;
   }
 
+  /**
+   * Copy a file using the {@link #lfs} {@link FileContext}.
+   *
+   * @param src the file to copy
+   * @param dst where to copy the file
+   * @param owner the owner of the new copy. Used only in secure Windows
+   * clusters
+   * @throws IOException when the copy fails
+   * @see WindowsSecureContainerExecutor
+   */
   protected void copyFile(Path src, Path dst, String owner) throws IOException 
{
 lfs.util().copy(src, dst, false, true);
   }
   
-  protected void setScriptExecutable(Path script, String owner) throws 
IOException {
+  /**
+   * Make a file executable using the {@link #lfs} {@link FileContext}.
+   *
+   * @param script the path to make executable
+   * @param owner the new owner for the file. Used only in secure Windows
+   * clusters
+   * @throws IOException when the change mode operation fails
+   * @see WindowsSecureContainerExecutor
+   */
+  protected void setScriptExecutable(Path script, String owner)
+  throws IOException {
 lfs.setPermission(script, ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
   }
 
@@ -122,14 +159,16 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 // randomly choose the local directory
 Path appStorageDir = getWorkingDir(localDirs, user, appId);
 
-String tokenFn = String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, 
locId);
+String tokenFn =
+String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId);
 Path tokenDst = new Path(appStorageDir, tokenFn);
 copyFile(nmPrivateContainerTokensPath, tokenDst, user);
-

hadoop git commit: YARN-5466. DefaultContainerExecutor needs JavaDocs (templedf via rkanter)

2016-10-17 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8fd4c37c4 -> f5d923591


YARN-5466. DefaultContainerExecutor needs JavaDocs (templedf via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5d92359
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5d92359
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5d92359

Branch: refs/heads/trunk
Commit: f5d92359145dfb820a9521e00e2d44c4ee96e67e
Parents: 8fd4c37
Author: Robert Kanter 
Authored: Mon Oct 17 14:29:09 2016 -0700
Committer: Robert Kanter 
Committed: Mon Oct 17 14:29:09 2016 -0700

--
 .../nodemanager/DefaultContainerExecutor.java   | 272 ---
 .../WindowsSecureContainerExecutor.java |   2 +-
 2 files changed, 231 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5d92359/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index 59b69ac..568c80b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -65,6 +65,11 @@ import 
org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 
+/**
+ * The {@code DefaultContainerExecuter} class offers generic container
+ * execution services. Process execution is handled in a platform-independent
+ * way via {@link ProcessBuilder}.
+ */
 public class DefaultContainerExecutor extends ContainerExecutor {
 
   private static final Log LOG = LogFactory
@@ -72,10 +77,17 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 
   private static final int WIN_MAX_PATH = 260;
 
+  /**
+   * A {@link FileContext} for the local file system.
+   */
   protected final FileContext lfs;
 
   private String logDirPermissions = null;
 
+  /**
+   * Default constructor for use in testing.
+   */
+  @VisibleForTesting
   public DefaultContainerExecutor() {
 try {
   this.lfs = FileContext.getLocalFSFileContext();
@@ -84,15 +96,40 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 }
   }
 
+  /**
+   * Create an instance with a given {@link FileContext}.
+   *
+   * @param lfs the given {@link FileContext}
+   */
   DefaultContainerExecutor(FileContext lfs) {
 this.lfs = lfs;
   }
 
+  /**
+   * Copy a file using the {@link #lfs} {@link FileContext}.
+   *
+   * @param src the file to copy
+   * @param dst where to copy the file
+   * @param owner the owner of the new copy. Used only in secure Windows
+   * clusters
+   * @throws IOException when the copy fails
+   * @see WindowsSecureContainerExecutor
+   */
   protected void copyFile(Path src, Path dst, String owner) throws IOException 
{
 lfs.util().copy(src, dst, false, true);
   }
   
-  protected void setScriptExecutable(Path script, String owner) throws 
IOException {
+  /**
+   * Make a file executable using the {@link #lfs} {@link FileContext}.
+   *
+   * @param script the path to make executable
+   * @param owner the new owner for the file. Used only in secure Windows
+   * clusters
+   * @throws IOException when the change mode operation fails
+   * @see WindowsSecureContainerExecutor
+   */
+  protected void setScriptExecutable(Path script, String owner)
+  throws IOException {
 lfs.setPermission(script, ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
   }
 
@@ -122,14 +159,16 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 // randomly choose the local directory
 Path appStorageDir = getWorkingDir(localDirs, user, appId);
 
-String tokenFn = String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, 
locId);
+String tokenFn =
+String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId);
 Path tokenDst = new Path(appStorageDir, tokenFn);
 copyFile(nmPrivateContainerTokensPath, tokenDst, user);
-LOG.info("Copying from " + nmPrivateContainerTokensPath + " to " + 
tokenDst);
+

hadoop git commit: HDFS-10922. Adding additional unit tests for Trash (II). Contributed by Weiwei Yang.

2016-10-17 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4ed7cf3b3 -> 61aceb87d


HDFS-10922. Adding additional unit tests for Trash (II). Contributed by Weiwei 
Yang.

(cherry picked from commit f1802d0be05ecc0b3248690b6f9efedbc7784112)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61aceb87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61aceb87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61aceb87

Branch: refs/heads/branch-2
Commit: 61aceb87dfae9beeebefcc3fcffc31f5ad4fc610
Parents: 4ed7cf3
Author: Xiaoyu Yao 
Authored: Mon Oct 17 08:22:31 2016 -0700
Committer: Xiaoyu Yao 
Committed: Mon Oct 17 14:21:51 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  40 +
 .../apache/hadoop/hdfs/TestDFSPermission.java   |  30 ++--
 .../org/apache/hadoop/hdfs/TestHDFSTrash.java   | 145 ++-
 3 files changed, 189 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61aceb87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index bba13da..bb8b2d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -162,6 +162,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -1933,4 +1934,43 @@ public class DFSTestUtil {
 lastBlock.setNumBytes(len);
 return lastBlock;
   }
+
+  /**
+   * Close current file system and create a new instance as given
+   * {@link UserGroupInformation}.
+   */
+  public static FileSystem login(final FileSystem fs,
+  final Configuration conf, final UserGroupInformation ugi)
+  throws IOException, InterruptedException {
+if (fs != null) {
+  fs.close();
+}
+return DFSTestUtil.getFileSystemAs(ugi, conf);
+  }
+
+  /**
+   * Test if the given {@link FileStatus} user, group owner and its permission
+   * are expected, throw {@link AssertionError} if any value is not expected.
+   */
+  public static void verifyFilePermission(FileStatus stat, String owner,
+  String group, FsAction u, FsAction g, FsAction o) {
+if(stat != null) {
+  if(!Strings.isNullOrEmpty(owner)) {
+assertEquals(owner, stat.getOwner());
+  }
+  if(!Strings.isNullOrEmpty(group)) {
+assertEquals(group, stat.getGroup());
+  }
+  FsPermission permission = stat.getPermission();
+  if(u != null) {
+assertEquals(u, permission.getUserAction());
+  }
+  if (g != null) {
+assertEquals(g, permission.getGroupAction());
+  }
+  if (o != null) {
+assertEquals(o, permission.getOtherAction());
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61aceb87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index d0d00e5..2705e67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -305,7 +305,7 @@ public class TestDFSPermission {
   fs.mkdirs(rootDir);
   fs.setPermission(rootDir, new FsPermission((short) 0777));
 
-  login(USER1);
+  fs = DFSTestUtil.login(fs, conf, USER1);
   fs.mkdirs(user1Dir);
   fs.setPermission(user1Dir, new FsPermission((short) 0755));
   fs.setOwner(user1Dir, USER1.getShortUserName(), GROUP2_NAME);
@@ -318,7 +318,7 @@ public class TestDFSPermission {
 // login as user2, attempt to delete /BSS/user1
 // this should fail because user2 has no permission to
 // its sub directory.
-login(USER2);
+fs = DFSTestUtil.login(fs, conf, USER2);
 fs.delete(user1Dir, true);
 fail("User2 should not be allowed to delete user1's dir.");
   } catch (AccessControlException e) {
@@ -331,7 +331,7 @@ 

hadoop git commit: HDFS-10922. Adding additional unit tests for Trash (II). Contributed by Weiwei Yang.

2016-10-17 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk b671ee684 -> 8fd4c37c4


HDFS-10922. Adding additional unit tests for Trash (II). Contributed by Weiwei 
Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fd4c37c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fd4c37c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fd4c37c

Branch: refs/heads/trunk
Commit: 8fd4c37c45585d761d279f2f6032ff9c6c049895
Parents: b671ee6
Author: Xiaoyu Yao 
Authored: Mon Oct 17 08:22:31 2016 -0700
Committer: Xiaoyu Yao 
Committed: Mon Oct 17 14:21:36 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  40 +
 .../apache/hadoop/hdfs/TestDFSPermission.java   |  30 ++--
 .../org/apache/hadoop/hdfs/TestHDFSTrash.java   | 145 ++-
 3 files changed, 189 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd4c37c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index f80cd78..963aaa6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -70,6 +70,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -2014,4 +2015,43 @@ public class DFSTestUtil {
   }
 }, 1000, 6);
   }
+
+  /**
+   * Close current file system and create a new instance as given
+   * {@link UserGroupInformation}.
+   */
+  public static FileSystem login(final FileSystem fs,
+  final Configuration conf, final UserGroupInformation ugi)
+  throws IOException, InterruptedException {
+if (fs != null) {
+  fs.close();
+}
+return DFSTestUtil.getFileSystemAs(ugi, conf);
+  }
+
+  /**
+   * Test if the given {@link FileStatus} user, group owner and its permission
+   * are expected, throw {@link AssertionError} if any value is not expected.
+   */
+  public static void verifyFilePermission(FileStatus stat, String owner,
+  String group, FsAction u, FsAction g, FsAction o) {
+if(stat != null) {
+  if(!Strings.isNullOrEmpty(owner)) {
+assertEquals(owner, stat.getOwner());
+  }
+  if(!Strings.isNullOrEmpty(group)) {
+assertEquals(group, stat.getGroup());
+  }
+  FsPermission permission = stat.getPermission();
+  if(u != null) {
+assertEquals(u, permission.getUserAction());
+  }
+  if (g != null) {
+assertEquals(g, permission.getGroupAction());
+  }
+  if (o != null) {
+assertEquals(o, permission.getOtherAction());
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd4c37c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index d0d00e5..2705e67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -305,7 +305,7 @@ public class TestDFSPermission {
   fs.mkdirs(rootDir);
   fs.setPermission(rootDir, new FsPermission((short) 0777));
 
-  login(USER1);
+  fs = DFSTestUtil.login(fs, conf, USER1);
   fs.mkdirs(user1Dir);
   fs.setPermission(user1Dir, new FsPermission((short) 0755));
   fs.setOwner(user1Dir, USER1.getShortUserName(), GROUP2_NAME);
@@ -318,7 +318,7 @@ public class TestDFSPermission {
 // login as user2, attempt to delete /BSS/user1
 // this should fail because user2 has no permission to
 // its sub directory.
-login(USER2);
+fs = DFSTestUtil.login(fs, conf, USER2);
 fs.delete(user1Dir, true);
 fail("User2 should not be allowed to delete user1's dir.");
   } catch (AccessControlException e) {
@@ -331,7 +331,7 @@ public class TestDFSPermission {
   assertTrue(fs.exists(user1Dir));
 
   try {
-

hadoop git commit: HDFS-11013. Correct typos in native erasure coding dump code. Contributed by László Bence Nagy.

2016-10-17 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 987ee5114 -> b671ee684


HDFS-11013. Correct typos in native erasure coding dump code. Contributed by 
László Bence Nagy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b671ee68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b671ee68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b671ee68

Branch: refs/heads/trunk
Commit: b671ee6846b79a6d106efed7cf7e1209b2cc408d
Parents: 987ee51
Author: Andrew Wang 
Authored: Mon Oct 17 14:14:50 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 17 14:14:50 2016 -0700

--
 .../main/native/src/org/apache/hadoop/io/erasurecode/dump.c  | 8 
 .../native/src/org/apache/hadoop/io/erasurecode/isal_load.h  | 2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b671ee68/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
index 20bd189..e48032e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/dump.c
@@ -57,11 +57,11 @@ void dumpCodingMatrix(unsigned char* buf, int n1, int n2) {
 
 void dumpEncoder(IsalEncoder* pCoder) {
   int numDataUnits = pCoder->coder.numDataUnits;
-  int numParityUnits = pCoder->coder.numDataUnits;
+  int numParityUnits = pCoder->coder.numParityUnits;
   int numAllUnits = pCoder->coder.numAllUnits;
 
-  printf("Encoding (numAlnumParityUnitslUnits = %d, numDataUnits = %d)\n",
-numParityUnits, numDataUnits);
+  printf("Encoding (numAllUnits = %d, numParityUnits = %d, numDataUnits = 
%d)\n",
+numAllUnits, numParityUnits, numDataUnits);
 
   printf("\n\nEncodeMatrix:\n");
   dumpCodingMatrix((unsigned char*) pCoder->encodeMatrix,
@@ -91,7 +91,7 @@ void dumpDecoder(IsalDecoder* pCoder) {
 
   printf("InvertMatrix:\n");
   dumpCodingMatrix((unsigned char*) pCoder->invertMatrix,
-   numDataUnits, numDataUnits);
+   numDataUnits, numAllUnits);
 
   printf("DecodeMatrix:\n");
   dumpCodingMatrix((unsigned char*) pCoder->decodeMatrix,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b671ee68/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
index 7cb7a6a..c46a531 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.h
@@ -57,7 +57,7 @@ typedef void (*__d_ec_encode_data_update)(int, int, int, int, 
unsigned char*,
 #endif
 
 #ifdef WINDOWS
-// For erasure_code.h
+// For gf_util.h
 typedef unsigned char (__cdecl *__d_gf_mul)(unsigned char, unsigned char);
 typedef unsigned char (__cdecl *__d_gf_inv)(unsigned char);
 typedef void (__cdecl *__d_gf_gen_rs_matrix)(unsigned char *, int, int);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HADOOP-13724. Fix a few typos in site markdown documents. Contributed by Ding Fei.

2016-10-17 Thread wang
HADOOP-13724. Fix a few typos in site markdown documents. Contributed by Ding 
Fei.

(cherry picked from commit 987ee51141a15d3f4d1df4dc792a192b92b87b5f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ed7cf3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ed7cf3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ed7cf3b

Branch: refs/heads/branch-2
Commit: 4ed7cf3b362f94367c57d012608213f46f0e16e8
Parents: fbdb23d
Author: Andrew Wang 
Authored: Mon Oct 17 13:25:58 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 17 13:32:39 2016 -0700

--
 .../src/site/markdown/ClusterSetup.md   |  2 +-
 .../src/site/markdown/Compatibility.md  | 16 +--
 .../site/markdown/InterfaceClassification.md| 28 ++--
 .../src/site/markdown/filesystem/filesystem.md  | 17 ++--
 .../markdown/filesystem/fsdatainputstream.md| 16 +--
 .../site/markdown/filesystem/introduction.md| 12 -
 .../src/site/markdown/filesystem/model.md   |  7 ++---
 .../src/site/markdown/filesystem/notation.md|  2 +-
 .../src/site/markdown/filesystem/testing.md |  4 +--
 .../src/site/markdown/HadoopArchives.md.vm  |  2 +-
 10 files changed, 53 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ed7cf3b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
index 7d2d38f..66c25e5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
@@ -35,7 +35,7 @@ Installation
 
 Installing a Hadoop cluster typically involves unpacking the software on all 
the machines in the cluster or installing it via a packaging system as 
appropriate for your operating system. It is important to divide up the 
hardware into functions.
 
-Typically one machine in the cluster is designated as the NameNode and another 
machine the as ResourceManager, exclusively. These are the masters. Other 
services (such as Web App Proxy Server and MapReduce Job History server) are 
usually run either on dedicated hardware or on shared infrastrucutre, depending 
upon the load.
+Typically one machine in the cluster is designated as the NameNode and another 
machine as the ResourceManager, exclusively. These are the masters. Other 
services (such as Web App Proxy Server and MapReduce Job History server) are 
usually run either on dedicated hardware or on shared infrastructure, depending 
upon the load.
 
 The rest of the machines in the cluster act as both DataNode and NodeManager. 
These are the slaves.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ed7cf3b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index c275518..a7ded24 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -68,7 +68,7 @@ Wire compatibility concerns data being transmitted over the 
wire between Hadoop
  Use Cases
 
 * Client-Server compatibility is required to allow users to continue using the 
old clients even after upgrading the server (cluster) to a later version (or 
vice versa). For example, a Hadoop 2.1.0 client talking to a Hadoop 2.3.0 
cluster.
-* Client-Server compatibility is also required to allow users to upgrade the 
client before upgrading the server (cluster). For example, a Hadoop 2.4.0 
client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side 
bug fixes ahead of full cluster upgrades. Note that new cluster features 
invoked by new client APIs or shell commands will not be usable. YARN 
applications that attempt to use new APIs (including new fields in data 
structures) that have not yet deployed to the cluster can expect link 
exceptions.
+* Client-Server compatibility is also required to allow users to upgrade the 
client before upgrading the server (cluster). For example, a Hadoop 2.4.0 
client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side 
bug fixes ahead of full cluster upgrades. Note that new cluster features 
invoked by new client APIs or shell commands will not be usable. YARN 
applications that attempt to use new APIs (including new fields in data 
structures) 

hadoop git commit: HADOOP-13724. Fix a few typos in site markdown documents. Contributed by Ding Fei.

2016-10-17 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0f4afc810 -> 987ee5114


HADOOP-13724. Fix a few typos in site markdown documents. Contributed by Ding 
Fei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/987ee511
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/987ee511
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/987ee511

Branch: refs/heads/trunk
Commit: 987ee51141a15d3f4d1df4dc792a192b92b87b5f
Parents: 0f4afc8
Author: Andrew Wang 
Authored: Mon Oct 17 13:25:58 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 17 13:25:58 2016 -0700

--
 .../src/site/markdown/ClusterSetup.md   |  2 +-
 .../src/site/markdown/Compatibility.md  | 16 +--
 .../site/markdown/InterfaceClassification.md| 28 ++--
 .../src/site/markdown/filesystem/filesystem.md  | 17 ++--
 .../markdown/filesystem/fsdatainputstream.md| 16 +--
 .../site/markdown/filesystem/introduction.md| 12 -
 .../src/site/markdown/filesystem/model.md   |  7 ++---
 .../src/site/markdown/filesystem/notation.md|  2 +-
 .../src/site/markdown/filesystem/testing.md |  4 +--
 .../src/site/markdown/HadoopArchives.md.vm  |  2 +-
 10 files changed, 53 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/987ee511/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
index f222769..56b43e6 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
@@ -35,7 +35,7 @@ Installation
 
 Installing a Hadoop cluster typically involves unpacking the software on all 
the machines in the cluster or installing it via a packaging system as 
appropriate for your operating system. It is important to divide up the 
hardware into functions.
 
-Typically one machine in the cluster is designated as the NameNode and another 
machine the as ResourceManager, exclusively. These are the masters. Other 
services (such as Web App Proxy Server and MapReduce Job History server) are 
usually run either on dedicated hardware or on shared infrastrucutre, depending 
upon the load.
+Typically one machine in the cluster is designated as the NameNode and another 
machine as the ResourceManager, exclusively. These are the masters. Other 
services (such as Web App Proxy Server and MapReduce Job History server) are 
usually run either on dedicated hardware or on shared infrastructure, depending 
upon the load.
 
 The rest of the machines in the cluster act as both DataNode and NodeManager. 
These are the workers.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/987ee511/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index d7827b5..05b18b5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -68,7 +68,7 @@ Wire compatibility concerns data being transmitted over the 
wire between Hadoop
  Use Cases
 
 * Client-Server compatibility is required to allow users to continue using the 
old clients even after upgrading the server (cluster) to a later version (or 
vice versa). For example, a Hadoop 2.1.0 client talking to a Hadoop 2.3.0 
cluster.
-* Client-Server compatibility is also required to allow users to upgrade the 
client before upgrading the server (cluster). For example, a Hadoop 2.4.0 
client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side 
bug fixes ahead of full cluster upgrades. Note that new cluster features 
invoked by new client APIs or shell commands will not be usable. YARN 
applications that attempt to use new APIs (including new fields in data 
structures) that have not yet deployed to the cluster can expect link 
exceptions.
+* Client-Server compatibility is also required to allow users to upgrade the 
client before upgrading the server (cluster). For example, a Hadoop 2.4.0 
client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side 
bug fixes ahead of full cluster upgrades. Note that new cluster features 
invoked by new client APIs or shell commands will not be usable. YARN 
applications that attempt to use new APIs (including new fields in data 

[2/2] hadoop git commit: HADOOP-13724. Fix a few typos in site markdown documents. Contributed by Ding Fei.

2016-10-17 Thread wang
HADOOP-13724. Fix a few typos in site markdown documents. Contributed by Ding 
Fei.

(cherry picked from commit 987ee51141a15d3f4d1df4dc792a192b92b87b5f)
(cherry picked from commit 4ed7cf3b362f94367c57d012608213f46f0e16e8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15ff590c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15ff590c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15ff590c

Branch: refs/heads/branch-2.8
Commit: 15ff590c375c2e2abc8d5e68938a373caeaaea7f
Parents: 9d473b8
Author: Andrew Wang 
Authored: Mon Oct 17 13:25:58 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 17 13:32:52 2016 -0700

--
 .../src/site/markdown/ClusterSetup.md   |  2 +-
 .../src/site/markdown/Compatibility.md  | 16 +--
 .../site/markdown/InterfaceClassification.md| 28 ++--
 .../src/site/markdown/filesystem/filesystem.md  | 17 ++--
 .../markdown/filesystem/fsdatainputstream.md| 16 +--
 .../site/markdown/filesystem/introduction.md| 12 -
 .../src/site/markdown/filesystem/model.md   |  7 ++---
 .../src/site/markdown/filesystem/notation.md|  2 +-
 .../src/site/markdown/filesystem/testing.md |  4 +--
 .../src/site/markdown/HadoopArchives.md.vm  |  2 +-
 10 files changed, 53 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15ff590c/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
index 7d2d38f..66c25e5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
@@ -35,7 +35,7 @@ Installation
 
 Installing a Hadoop cluster typically involves unpacking the software on all 
the machines in the cluster or installing it via a packaging system as 
appropriate for your operating system. It is important to divide up the 
hardware into functions.
 
-Typically one machine in the cluster is designated as the NameNode and another 
machine the as ResourceManager, exclusively. These are the masters. Other 
services (such as Web App Proxy Server and MapReduce Job History server) are 
usually run either on dedicated hardware or on shared infrastrucutre, depending 
upon the load.
+Typically one machine in the cluster is designated as the NameNode and another 
machine as the ResourceManager, exclusively. These are the masters. Other 
services (such as Web App Proxy Server and MapReduce Job History server) are 
usually run either on dedicated hardware or on shared infrastructure, depending 
upon the load.
 
 The rest of the machines in the cluster act as both DataNode and NodeManager. 
These are the slaves.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15ff590c/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index c275518..a7ded24 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -68,7 +68,7 @@ Wire compatibility concerns data being transmitted over the 
wire between Hadoop
  Use Cases
 
 * Client-Server compatibility is required to allow users to continue using the 
old clients even after upgrading the server (cluster) to a later version (or 
vice versa). For example, a Hadoop 2.1.0 client talking to a Hadoop 2.3.0 
cluster.
-* Client-Server compatibility is also required to allow users to upgrade the 
client before upgrading the server (cluster). For example, a Hadoop 2.4.0 
client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side 
bug fixes ahead of full cluster upgrades. Note that new cluster features 
invoked by new client APIs or shell commands will not be usable. YARN 
applications that attempt to use new APIs (including new fields in data 
structures) that have not yet deployed to the cluster can expect link 
exceptions.
+* Client-Server compatibility is also required to allow users to upgrade the 
client before upgrading the server (cluster). For example, a Hadoop 2.4.0 
client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side 
bug fixes ahead of full cluster upgrades. Note that new cluster features 
invoked by new client APIs or shell commands will not be usable. YARN 
applications 

[1/2] hadoop git commit: HADOOP-11559. Add links to RackAwareness and InterfaceClassification to site index (Masatake Iwasaki via aw)

2016-10-17 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7993fb5b8 -> 4ed7cf3b3


HADOOP-11559. Add links to RackAwareness and InterfaceClassification to site 
index (Masatake Iwasaki via aw)

(cherry picked from commit 7eeca90daabd74934d4c94af6f07fd598abdb4ed)

 Conflicts:
hadoop-common-project/hadoop-common/CHANGES.txt
hadoop-project/src/site/site.xml


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbdb23d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbdb23d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbdb23d2

Branch: refs/heads/branch-2
Commit: fbdb23d2afa993f96f073b9c4208282e8a280016
Parents: 7993fb5
Author: Allen Wittenauer 
Authored: Tue Feb 10 17:06:03 2015 -0800
Committer: Andrew Wang 
Committed: Mon Oct 17 13:32:28 2016 -0700

--
 .../site/markdown/InterfaceClassification.md| 204 +++
 .../src/site/markdown/RackAwareness.md  |  54 -
 hadoop-project/src/site/site.xml|   2 +-
 3 files changed, 204 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbdb23d2/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
index 493b0dd..07abdac 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
@@ -20,80 +20,196 @@ Hadoop Interface Taxonomy: Audience and Stability 
Classification
 Motivation
 --
 
-The interface taxonomy classification provided here is for guidance to 
developers and users of interfaces. The classification guides a developer to 
declare the targeted audience or users of an interface and also its stability.
+The interface taxonomy classification provided here is for guidance to
+developers and users of interfaces. The classification guides a developer to
+declare the targeted audience or users of an interface and also its stability.
 
 * Benefits to the user of an interface: Knows which interfaces to use or not 
use and their stability.
-* Benefits to the developer: to prevent accidental changes of interfaces and 
hence accidental impact on users or other components or system. This is 
particularly useful in large systems with many developers who may not all have 
a shared state/history of the project.
+
+* Benefits to the developer: to prevent accidental changes of interfaces and
+  hence accidental impact on users or other components or system. This is
+  particularly useful in large systems with many developers who may not all 
have
+  a shared state/history of the project.
 
 Interface Classification
 
 
-Hadoop adopts the following interface classification, this classification was 
derived from the [OpenSolaris 
taxonomy](http://www.opensolaris.org/os/community/arc/policies/interface-taxonomy/#Advice)
 and, to some extent, from taxonomy used inside Yahoo. Interfaces have two main 
attributes: Audience and Stability
+Hadoop adopts the following interface classification,
+this classification was derived from the
+[OpenSolaris 
taxonomy](http://www.opensolaris.org/os/community/arc/policies/interface-taxonomy/#Advice)
+and, to some extent, from taxonomy used inside Yahoo.
+Interfaces have two main attributes: Audience and Stability
 
 ### Audience
 
-Audience denotes the potential consumers of the interface. While many 
interfaces are internal/private to the implementation, other are 
public/external interfaces are meant for wider consumption by applications 
and/or clients. For example, in posix, libc is an external or public interface, 
while large parts of the kernel are internal or private interfaces. Also, some 
interfaces are targeted towards other specific subsystems.
+Audience denotes the potential consumers of the interface. While many 
interfaces
+are internal/private to the implementation, other are public/external 
interfaces
+are meant for wider consumption by applications and/or clients. For example, in
+posix, libc is an external or public interface, while large parts of the kernel
+are internal or private interfaces. Also, some interfaces are targeted towards
+other specific subsystems.
 
-Identifying the audience of an interface helps define the impact of breaking 
it. For instance, it might be okay to break the compatibility of an interface 
whose audience is a small number of specific subsystems. On the other hand, it 
is probably not okay to break a protocol interfaces that 

[1/2] hadoop git commit: HADOOP-11559. Add links to RackAwareness and InterfaceClassification to site index (Masatake Iwasaki via aw)

2016-10-17 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 b2667441e -> 15ff590c3


HADOOP-11559. Add links to RackAwareness and InterfaceClassification to site 
index (Masatake Iwasaki via aw)

(cherry picked from commit 7eeca90daabd74934d4c94af6f07fd598abdb4ed)

 Conflicts:
hadoop-common-project/hadoop-common/CHANGES.txt
hadoop-project/src/site/site.xml

(cherry picked from commit fbdb23d2afa993f96f073b9c4208282e8a280016)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d473b8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d473b8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d473b8d

Branch: refs/heads/branch-2.8
Commit: 9d473b8ddcea659cf362fa0f9331b10b9b4dfb7d
Parents: b266744
Author: Allen Wittenauer 
Authored: Tue Feb 10 17:06:03 2015 -0800
Committer: Andrew Wang 
Committed: Mon Oct 17 13:32:51 2016 -0700

--
 .../site/markdown/InterfaceClassification.md| 204 +++
 .../src/site/markdown/RackAwareness.md  |  54 -
 hadoop-project/src/site/site.xml|   2 +-
 3 files changed, 204 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d473b8d/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
index 493b0dd..07abdac 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
@@ -20,80 +20,196 @@ Hadoop Interface Taxonomy: Audience and Stability 
Classification
 Motivation
 --
 
-The interface taxonomy classification provided here is for guidance to 
developers and users of interfaces. The classification guides a developer to 
declare the targeted audience or users of an interface and also its stability.
+The interface taxonomy classification provided here is for guidance to
+developers and users of interfaces. The classification guides a developer to
+declare the targeted audience or users of an interface and also its stability.
 
 * Benefits to the user of an interface: Knows which interfaces to use or not 
use and their stability.
-* Benefits to the developer: to prevent accidental changes of interfaces and 
hence accidental impact on users or other components or system. This is 
particularly useful in large systems with many developers who may not all have 
a shared state/history of the project.
+
+* Benefits to the developer: to prevent accidental changes of interfaces and
+  hence accidental impact on users or other components or system. This is
+  particularly useful in large systems with many developers who may not all 
have
+  a shared state/history of the project.
 
 Interface Classification
 
 
-Hadoop adopts the following interface classification, this classification was 
derived from the [OpenSolaris 
taxonomy](http://www.opensolaris.org/os/community/arc/policies/interface-taxonomy/#Advice)
 and, to some extent, from taxonomy used inside Yahoo. Interfaces have two main 
attributes: Audience and Stability
+Hadoop adopts the following interface classification,
+this classification was derived from the
+[OpenSolaris 
taxonomy](http://www.opensolaris.org/os/community/arc/policies/interface-taxonomy/#Advice)
+and, to some extent, from taxonomy used inside Yahoo.
+Interfaces have two main attributes: Audience and Stability
 
 ### Audience
 
-Audience denotes the potential consumers of the interface. While many 
interfaces are internal/private to the implementation, other are 
public/external interfaces are meant for wider consumption by applications 
and/or clients. For example, in posix, libc is an external or public interface, 
while large parts of the kernel are internal or private interfaces. Also, some 
interfaces are targeted towards other specific subsystems.
+Audience denotes the potential consumers of the interface. While many 
interfaces
+are internal/private to the implementation, other are public/external 
interfaces
+are meant for wider consumption by applications and/or clients. For example, in
+posix, libc is an external or public interface, while large parts of the kernel
+are internal or private interfaces. Also, some interfaces are targeted towards
+other specific subsystems.
 
-Identifying the audience of an interface helps define the impact of breaking 
it. For instance, it might be okay to break the compatibility of an interface 
whose audience is a small number of specific subsystems. On the 

hadoop git commit: HADOOP-13722. Code cleanup -- ViewFileSystem and InodeTree. Contributed by Manoj Govindassamy.

2016-10-17 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 412c4c9a3 -> 0f4afc810


HADOOP-13722. Code cleanup -- ViewFileSystem and InodeTree. Contributed by 
Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f4afc81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f4afc81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f4afc81

Branch: refs/heads/trunk
Commit: 0f4afc81009129bbee89d5b6cf22c8dda612d223
Parents: 412c4c9
Author: Andrew Wang 
Authored: Mon Oct 17 13:15:11 2016 -0700
Committer: Andrew Wang 
Committed: Mon Oct 17 13:15:11 2016 -0700

--
 .../org/apache/hadoop/fs/viewfs/InodeTree.java  | 206 +--
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  91 
 .../hadoop/fs/viewfs/TestViewFsConfig.java  |  42 ++--
 3 files changed, 155 insertions(+), 184 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f4afc81/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index 8c42cdf..a485a3b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -36,47 +36,45 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
-
 /**
  * InodeTree implements a mount-table as a tree of inodes.
  * It is used to implement ViewFs and ViewFileSystem.
  * In order to use it the caller must subclass it and implement
  * the abstract methods {@link #getTargetFileSystem(INodeDir)}, etc.
- * 
+ *
  * The mountable is initialized from the config variables as 
  * specified in {@link ViewFs}
  *
  * @param  is AbstractFileSystem or FileSystem
- * 
- * The three main methods are
- * {@link #InodeTreel(Configuration)} // constructor
+ *
+ * The two main methods are
  * {@link #InodeTree(Configuration, String)} // constructor
  * {@link #resolve(String, boolean)} 
  */
 
 @InterfaceAudience.Private
-@InterfaceStability.Unstable 
+@InterfaceStability.Unstable
 abstract class InodeTree {
-  static enum ResultKind {isInternalDir, isExternalDir;};
+  enum ResultKind {
+INTERNAL_DIR,
+EXTERNAL_DIR
+  }
+
   static final Path SlashPath = new Path("/");
-  
-  final INodeDir root; // the root of the mount table
-  
-  final String homedirPrefix; // the homedir config value for this mount table
-  
-  List mountPoints = new ArrayList();
-  
-  
+  private final INodeDir root; // the root of the mount table
+  private final String homedirPrefix; // the homedir for this mount table
+  private List mountPoints = new ArrayList();
+
   static class MountPoint {
 String src;
 INodeLink target;
+
 MountPoint(String srcPath, INodeLink mountLink) {
   src = srcPath;
   target = mountLink;
 }
-
   }
-  
+
   /**
* Breaks file path into component names.
* @param path
@@ -84,18 +82,19 @@ abstract class InodeTree {
*/
   static String[] breakIntoPathComponents(final String path) {
 return path == null ? null : path.split(Path.SEPARATOR);
-  } 
-  
+  }
+
   /**
* Internal class for inode tree
* @param 
*/
   abstract static class INode {
 final String fullPath; // the full path to the root
+
 public INode(String pathToNode, UserGroupInformation aUgi) {
   fullPath = pathToNode;
 }
-  };
+  }
 
   /**
* Internal class to represent an internal dir of the mount table
@@ -105,37 +104,28 @@ abstract class InodeTree {
 final Map children = new HashMap();
 T InodeDirFs =  null; // file system of this internal directory of mountT
 boolean isRoot = false;
-
+
 INodeDir(final String pathToNode, final UserGroupInformation aUgi) {
   super(pathToNode, aUgi);
 }
 
-

hadoop git commit: HDFS-9820. Improve distcp to support efficient restore to an earlier snapshot. Contributed by Yongjun Zhang.

2016-10-17 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk ed9fcbec5 -> 412c4c9a3


HDFS-9820. Improve distcp to support efficient restore to an earlier snapshot. 
Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/412c4c9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/412c4c9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/412c4c9a

Branch: refs/heads/trunk
Commit: 412c4c9a342b73bf1c1a7f43ea91245cbf94d02d
Parents: ed9fcbe
Author: Yongjun Zhang 
Authored: Fri Oct 14 15:17:33 2016 -0700
Committer: Yongjun Zhang 
Committed: Mon Oct 17 11:04:42 2016 -0700

--
 .../java/org/apache/hadoop/tools/DiffInfo.java  |  47 +-
 .../java/org/apache/hadoop/tools/DistCp.java|  34 +-
 .../apache/hadoop/tools/DistCpConstants.java|   1 +
 .../apache/hadoop/tools/DistCpOptionSwitch.java |   5 +
 .../org/apache/hadoop/tools/DistCpOptions.java  |  79 +-
 .../org/apache/hadoop/tools/DistCpSync.java | 256 --
 .../org/apache/hadoop/tools/OptionsParser.java  |  27 +-
 .../apache/hadoop/tools/SimpleCopyListing.java  |  17 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java |   4 +-
 .../hadoop/tools/TestDistCpSyncReverseBase.java | 868 +++
 .../tools/TestDistCpSyncReverseFromSource.java  |  36 +
 .../tools/TestDistCpSyncReverseFromTarget.java  |  36 +
 .../apache/hadoop/tools/TestOptionsParser.java  |  85 +-
 13 files changed, 1340 insertions(+), 155 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/412c4c9a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
index 79bb7fe..7e56301 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
@@ -44,28 +44,49 @@ class DiffInfo {
   };
 
   /** The source file/dir of the rename or deletion op */
-  final Path source;
+  private Path source;
+  /** The target file/dir of the rename op. Null means the op is deletion. */
+  private Path target;
+
+  private SnapshotDiffReport.DiffType type;
   /**
* The intermediate file/dir for the op. For a rename or a delete op,
* we first rename the source to this tmp file/dir.
*/
   private Path tmp;
-  /** The target file/dir of the rename op. Null means the op is deletion. */
-  Path target;
-
-  private final SnapshotDiffReport.DiffType type;
-
-  public SnapshotDiffReport.DiffType getType(){
-return this.type;
-  }
 
-  DiffInfo(Path source, Path target, SnapshotDiffReport.DiffType type) {
+  DiffInfo(final Path source, final Path target,
+  SnapshotDiffReport.DiffType type) {
 assert source != null;
 this.source = source;
 this.target= target;
 this.type = type;
   }
 
+  void setSource(final Path source) {
+this.source = source;
+  }
+
+  Path getSource() {
+return source;
+  }
+
+  void setTarget(final Path target) {
+this.target = target;
+  }
+
+  Path getTarget() {
+return target;
+  }
+
+  public void setType(final SnapshotDiffReport.DiffType type){
+this.type = type;
+  }
+
+  public SnapshotDiffReport.DiffType getType(){
+return type;
+  }
+
   void setTmp(Path tmp) {
 this.tmp = tmp;
   }
@@ -73,4 +94,10 @@ class DiffInfo {
   Path getTmp() {
 return tmp;
   }
+
+  @Override
+  public String toString() {
+return type + ": src=" + String.valueOf(source) + " tgt="
++ String.valueOf(target) + " tmp=" + String.valueOf(tmp);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/412c4c9a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index be58f13..e9decd2 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -77,6 +77,21 @@ public class DistCp extends Configured implements Tool {
   private boolean submitted;
   private FileSystem jobFS;
 
+  private void prepareFileListing(Job job) throws Exception {
+if (inputOptions.shouldUseSnapshotDiff()) {
+  try {
+DistCpSync distCpSync = new DistCpSync(inputOptions, getConf());
+distCpSync.sync();
+createInputFileListingWithDiff(job, distCpSync);
+  } catch 

[hadoop] Git Push Summary

2016-10-17 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/MR-6749 [created] ed9fcbec5

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5145. [YARN-3368] Move new YARN UI configuration to HADOOP_CONF_DIR. (Sunil G and Kai Sasaki via wangda) [Forced Update!]

2016-10-17 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3368 754b30234 -> b133ccf44 (forced update)


YARN-5145. [YARN-3368] Move new YARN UI configuration to HADOOP_CONF_DIR. 
(Sunil G and Kai Sasaki via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b133ccf4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b133ccf4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b133ccf4

Branch: refs/heads/YARN-3368
Commit: b133ccf4477102075fcd178417975595c02250cf
Parents: 314bcba
Author: Wangda Tan 
Authored: Mon Oct 17 11:30:16 2016 -0700
Committer: Wangda Tan 
Committed: Mon Oct 17 11:37:26 2016 -0700

--
 .../src/main/webapp/app/initializers/loader.js  | 86 
 .../tests/unit/initializers/loader-test.js  | 40 +
 2 files changed, 126 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b133ccf4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
new file mode 100644
index 000..08e4dbd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+function getTimeLineURL(parameters) {
+  return '/conf?name=yarn.timeline-service.webapp.address';
+}
+
+function updateConfigs(application) {
+  var hostname = window.location.hostname;
+  var rmhost = hostname +
+(window.location.port ? ':' + window.location.port: '');
+
+  Ember.Logger.log("RM Address:" + rmhost);
+
+  if(!ENV.hosts.rmWebAddress) {
+ENV = {
+   hosts: {
+  rmWebAddress: rmhost,
+},
+};
+  }
+
+  if(!ENV.hosts.timelineWebAddress) {
+var result = [];
+var timelinehost = "";
+$.ajax({
+  type: 'GET',
+  dataType: 'json',
+  async: true,
+  context: this,
+  url: getTimeLineURL(),
+  success: function(data) {
+timelinehost = data.property.value;
+ENV.hosts.timelineWebAddress = timelinehost;
+
+var address = timelinehost.split(":")[0];
+var port = timelinehost.split(":")[1];
+
+Ember.Logger.log("Timeline Address from RM:" + address + ":" + port);
+
+if(address == "0.0.0.0" || address == "localhost") {
+  var updatedAddress =  hostname + ":" + port;
+
+  /* Timeline v2 is not supporting CORS, so make as default*/
+  ENV = {
+ hosts: {
+rmWebAddress: rmhost,
+timelineWebAddress: updatedAddress,
+  },
+  };
+  Ember.Logger.log("Timeline Updated Address:" + updatedAddress);
+}
+application.advanceReadiness();
+  },
+});
+  } else {
+application.advanceReadiness();
+  }
+}
+
+export function initialize( application ) {
+  application.deferReadiness();
+  updateConfigs(application);
+}
+
+export default {
+  name: 'loader',
+  before: 'env',
+  initialize
+};

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b133ccf4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/initializers/loader-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/initializers/loader-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/initializers/loader-test.js
new file mode 100644
index 000..cc32e92
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/initializers/loader-test.js
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed 

hadoop git commit: YARN-5145. [YARN-3368] Move new YARN UI configuration to HADOOP_CONF_DIR. (Sunil G and Kai Sasaki via wangda) [Forced Update!]

2016-10-17 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3368 5382eb25f -> 754b30234 (forced update)


YARN-5145. [YARN-3368] Move new YARN UI configuration to HADOOP_CONF_DIR. 
(Sunil G and Kai Sasaki via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/754b3023
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/754b3023
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/754b3023

Branch: refs/heads/YARN-3368
Commit: 754b302341df7145518115993092bc44483fef69
Parents: 314bcba
Author: Wangda Tan 
Authored: Mon Oct 17 11:30:16 2016 -0700
Committer: Wangda Tan 
Committed: Mon Oct 17 11:33:48 2016 -0700

--
 .../src/site/markdown/YarnUI2.md|  6 +-
 .../src/main/webapp/app/initializers/loader.js  | 86 
 .../tests/unit/initializers/loader-test.js  | 40 +
 3 files changed, 129 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/754b3023/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
index ff48183..9ebb148 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
@@ -34,8 +34,8 @@ Configurations
 
 | Configuration Property | Description |
 |: |: |
-| `yarn.resourcemanager.webapp.ui2.enable` | In the server side it indicates 
whether the new YARN-UI v2 is enabled or not. Defaults to `false`. |
-| `yarn.resourcemanager.webapp.ui2.address` | Specify the address of 
ResourceManager and port which host YARN-UI v2, defaults to `localhost:8288`. |
+| `yarn.webapp.ui2.enable` | In the server side it indicates whether the new 
YARN-UI v2 is enabled or not. Defaults to `false`. |
+| `yarn.webapp.ui2.war-file-path` | WAR file path for launching yarn UI2 web 
application. |
 
 *In $HADOOP_PREFIX/share/hadoop/yarn/webapps/rm/config/configs.env*
 
@@ -44,4 +44,4 @@ Configurations
 
 Use it
 -
-Open your browser, go to `rm-address:8288` and try it!
+Open your browser, go to `rm-address:8088/ui2` and try it!

http://git-wip-us.apache.org/repos/asf/hadoop/blob/754b3023/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
new file mode 100644
index 000..08e4dbd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+function getTimeLineURL(parameters) {
+  return '/conf?name=yarn.timeline-service.webapp.address';
+}
+
+function updateConfigs(application) {
+  var hostname = window.location.hostname;
+  var rmhost = hostname +
+(window.location.port ? ':' + window.location.port: '');
+
+  Ember.Logger.log("RM Address:" + rmhost);
+
+  if(!ENV.hosts.rmWebAddress) {
+ENV = {
+   hosts: {
+  rmWebAddress: rmhost,
+},
+};
+  }
+
+  if(!ENV.hosts.timelineWebAddress) {
+var result = [];
+var timelinehost = "";
+$.ajax({
+  type: 'GET',
+  dataType: 'json',
+  async: true,
+  context: this,
+  url: getTimeLineURL(),
+  success: function(data) {
+timelinehost = data.property.value;
+ENV.hosts.timelineWebAddress = timelinehost;
+
+var address = timelinehost.split(":")[0];
+var port = timelinehost.split(":")[1];
+
+Ember.Logger.log("Timeline Address from RM:" + address + ":" + port);
+
+

[38/50] [abbrv] hadoop git commit: YARN-5682. [YARN-3368] Fix maven build to keep all generated or downloaded files in target folder (Wangda Tan via Sunil G)

2016-10-17 Thread wangda
YARN-5682. [YARN-3368] Fix maven build to keep all generated or downloaded 
files in target folder (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9a16ce0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9a16ce0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9a16ce0

Branch: refs/heads/YARN-3368
Commit: f9a16ce0aaf8305819eb0b84e3fb12dc831c278f
Parents: 8a09f15
Author: sunilg 
Authored: Tue Oct 4 21:07:42 2016 +0530
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  | 54 
 hadoop-yarn-project/hadoop-yarn/pom.xml |  2 +-
 2 files changed, 34 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9a16ce0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index b750a73..440aca9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -31,7 +31,7 @@
 
   
 war
-src/main/webapp
+${basedir}/target/src/main/webapp
 node
 v0.12.2
 2.10.0
@@ -84,10 +84,10 @@
   false
   
 
-  
${basedir}/src/main/webapp/bower_components
+  ${webappTgtDir}/bower_components
 
 
-  ${basedir}/src/main/webapp/node_modules
+  ${webappTgtDir}/node_modules
 
   
 
@@ -109,6 +109,33 @@
 
   
 
+  
+  
+org.apache.maven.plugins
+maven-antrun-plugin
+
+  
+prepare-source-code
+generate-sources
+
+  run
+
+
+  
+
+  
+
+
+
+  
+
+  
+
+  
+
+  
+
+
   
   
 exec-maven-plugin
@@ -121,7 +148,7 @@
   exec
 
 
-  ${webappDir}
+  ${webappTgtDir}
   npm
   
 install
@@ -135,7 +162,7 @@
   exec
 
 
-  ${webappDir}
+  ${webappTgtDir}
   bower
   
 --allow-root
@@ -150,7 +177,7 @@
   exec
 
 
-  ${webappDir}
+  ${webappTgtDir}
   ember
   
 build
@@ -160,21 +187,6 @@
   
 
   
-  
-cleanup tmp
-generate-sources
-
-  exec
-
-
-  ${webappDir}
-  rm
-  
--rf
-tmp
-  
-
-  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9a16ce0/hadoop-yarn-project/hadoop-yarn/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/pom.xml
index ca78ef8..70b68d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -230,7 +230,6 @@
   
 
   
-hadoop-yarn-ui
 hadoop-yarn-api
 hadoop-yarn-common
 hadoop-yarn-server
@@ -238,5 +237,6 @@
 hadoop-yarn-site
 hadoop-yarn-client
 hadoop-yarn-registry
+hadoop-yarn-ui
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: YARN-4733. [YARN-3368] Initial commit of new YARN web UI. (wangda)

2016-10-17 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c04d7e55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
new file mode 100644
index 000..d39885e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/cluster-metric.js
@@ -0,0 +1,29 @@
+import DS from 'ember-data';
+
+export default DS.JSONAPISerializer.extend({
+normalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  var fixedPayload = {
+id: id,
+type: primaryModelClass.modelName,
+attributes: payload
+  };
+
+  return this._super(store, primaryModelClass, fixedPayload, id,
+requestType);
+},
+
+normalizeArrayResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  // return expected is { data: [ {}, {} ] }
+  var normalizedArrayResponse = {};
+
+  // payload has apps : { app: [ {},{},{} ]  }
+  // need some error handling for ex apps or app may not be defined.
+  normalizedArrayResponse.data = [
+this.normalizeSingleResponse(store, primaryModelClass,
+  payload.clusterMetrics, 1, requestType)
+  ];
+  return normalizedArrayResponse;
+}
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c04d7e55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
new file mode 100644
index 000..c5394d0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app-attempt.js
@@ -0,0 +1,49 @@
+import DS from 'ember-data';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.JSONAPISerializer.extend({
+internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  
+  if (payload.appAttempt) {
+payload = payload.appAttempt;  
+  }
+  
+  var fixedPayload = {
+id: payload.appAttemptId,
+type: primaryModelClass.modelName, // yarn-app
+attributes: {
+  startTime: Converter.timeStampToDate(payload.startTime),
+  finishedTime: Converter.timeStampToDate(payload.finishedTime),
+  containerId: payload.containerId,
+  nodeHttpAddress: payload.nodeHttpAddress,
+  nodeId: payload.nodeId,
+  state: payload.nodeId,
+  logsLink: payload.logsLink
+}
+  };
+
+  return fixedPayload;
+},
+
+normalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  var p = this.internalNormalizeSingleResponse(store, 
+primaryModelClass, payload, id, requestType);
+  return { data: p };
+},
+
+normalizeArrayResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  // return expected is { data: [ {}, {} ] }
+  var normalizedArrayResponse = {};
+
+  // payload has apps : { app: [ {},{},{} ]  }
+  // need some error handling for ex apps or app may not be defined.
+  normalizedArrayResponse.data = 
payload.appAttempts.appAttempt.map(singleApp => {
+return this.internalNormalizeSingleResponse(store, primaryModelClass,
+  singleApp, singleApp.id, requestType);
+  }, this);
+  return normalizedArrayResponse;
+}
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c04d7e55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
new file mode 100644
index 000..a038fff
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/serializers/yarn-app.js
@@ -0,0 +1,66 @@
+import DS from 'ember-data';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.JSONAPISerializer.extend({
+internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
+  requestType) {
+  if (payload.app) {
+payload = payload.app;  
+  }
+  
+  var fixedPayload = {
+id: id,
+type: primaryModelClass.modelName, // yarn-app
+attributes: {
+  appName: payload.name,
+  user: payload.user,
+  queue: payload.queue,
+  state: payload.state,
+  startTime: Converter.timeStampToDate(payload.startedTime),
+  elapsedTime: 

[25/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-17 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb176dae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
new file mode 100644
index 000..ca80ccd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node-containers.hbs
@@ -0,0 +1,58 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+
+  
+{{node-menu path="yarnNodeContainers" nodeAddr=model.nodeInfo.addr 
nodeId=model.nodeInfo.id}}
+
+  
+
+  
+Container ID
+Container State
+User
+Logs
+  
+
+
+  {{#if model.containers}}
+{{#each model.containers as |container|}}
+  {{#if container.isDummyContainer}}
+No containers found on this 
node
+  {{else}}
+
+  {{container.containerId}}
+  {{container.state}}
+  {{container.user}}
+  
+{{log-files-comma nodeId=model.nodeInfo.id
+nodeAddr=model.nodeInfo.addr
+containerId=container.containerId
+logFiles=container.containerLogFiles}}
+  
+
+  {{/if}}
+{{/each}}
+  {{/if}}
+
+  
+  {{simple-table table-id="node-containers-table" bFilter=true 
colsOrder="0,desc" colTypes="natural" colTargets="0"}}
+
+  
+
+{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb176dae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
new file mode 100644
index 000..a036076
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
@@ -0,0 +1,94 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+
+  
+{{node-menu path="yarnNode" nodeId=model.rmNode.id nodeAddr=model.node.id}}
+
+  
+Node Information
+  
+
+  
+Total Vmem allocated for Containers
+{{divide num=model.node.totalVmemAllocatedContainersMB 
den=1024}} GB
+  
+  
+Vmem enforcement enabled
+{{model.node.vmemCheckEnabled}}
+  
+  
+Total Pmem allocated for Containers
+{{divide num=model.node.totalPmemAllocatedContainersMB 
den=1024}} GB
+  
+  
+Pmem enforcement enabled
+{{model.node.pmemCheckEnabled}}
+  
+  
+Total VCores allocated for Containers
+{{model.node.totalVCoresAllocatedContainers}}
+  
+  
+Node Healthy Status
+

[41/50] [abbrv] hadoop git commit: YARN-5019. [YARN-3368] Change urls in new YARN ui from camel casing to hyphens. (Sunil G via wangda)

2016-10-17 Thread wangda
YARN-5019. [YARN-3368] Change urls in new YARN ui from camel casing to hyphens. 
(Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a794160
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a794160
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a794160

Branch: refs/heads/YARN-3368
Commit: 6a7941600c00758714b9260db656dc9f01303ee4
Parents: 7c786be
Author: Wangda Tan 
Authored: Mon May 9 11:29:59 2016 -0700
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 .../main/webapp/app/components/tree-selector.js |  4 +--
 .../main/webapp/app/controllers/application.js  | 16 +-
 .../main/webapp/app/helpers/log-files-comma.js  |  2 +-
 .../src/main/webapp/app/helpers/node-link.js|  2 +-
 .../src/main/webapp/app/helpers/node-menu.js| 12 
 .../main/webapp/app/models/yarn-app-attempt.js  |  2 +-
 .../src/main/webapp/app/router.js   | 32 ++--
 .../src/main/webapp/app/routes/index.js |  2 +-
 .../main/webapp/app/routes/yarn-app-attempt.js  |  6 ++--
 .../src/main/webapp/app/routes/yarn-app.js  |  4 +--
 .../src/main/webapp/app/routes/yarn-apps.js |  2 +-
 .../webapp/app/routes/yarn-container-log.js |  2 +-
 .../src/main/webapp/app/routes/yarn-node-app.js |  2 +-
 .../main/webapp/app/routes/yarn-node-apps.js|  2 +-
 .../webapp/app/routes/yarn-node-container.js|  2 +-
 .../webapp/app/routes/yarn-node-containers.js   |  2 +-
 .../src/main/webapp/app/routes/yarn-node.js |  4 +--
 .../src/main/webapp/app/routes/yarn-nodes.js|  2 +-
 .../src/main/webapp/app/routes/yarn-queue.js|  6 ++--
 .../main/webapp/app/routes/yarn-queues/index.js |  2 +-
 .../app/routes/yarn-queues/queues-selector.js   |  2 +-
 .../app/templates/components/app-table.hbs  |  4 +--
 .../webapp/app/templates/yarn-container-log.hbs |  2 +-
 .../main/webapp/app/templates/yarn-node-app.hbs |  4 +--
 .../webapp/app/templates/yarn-node-apps.hbs |  4 +--
 .../app/templates/yarn-node-container.hbs   |  2 +-
 .../app/templates/yarn-node-containers.hbs  |  4 +--
 .../src/main/webapp/app/templates/yarn-node.hbs |  2 +-
 28 files changed, 66 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a794160/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
index f7ec020..698c253 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -126,7 +126,7 @@ export default Ember.Component.extend({
   .attr("transform", function(d) { return "translate(" + source.y0 + "," + 
source.x0 + ")"; })
   .on("click", function(d,i){
 if (d.queueData.get("name") != this.get("selected")) {
-document.location.href = "yarnQueue/" + d.queueData.get("name");
+document.location.href = "yarn-queue/" + d.queueData.get("name");
 }
   }.bind(this));
   // .on("click", click);
@@ -176,7 +176,7 @@ export default Ember.Component.extend({
   .attr("r", 20)
   .attr("href", 
 function(d) {
-  return "yarnQueues/" + d.queueData.get("name");
+  return "yarn-queues/" + d.queueData.get("name");
 })
   .style("stroke", function(d) {
 if (d.queueData.get("name") == this.get("selected")) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a794160/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
index 3c68365..2effb13 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
@@ -29,25 +29,25 @@ export default Ember.Controller.extend({
   outputMainMenu: function(){
 var path = this.get('currentPath');
 var html = 'Queues' +
+html = html + '>Queues' +
 '(current)

[23/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-17 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb176dae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
deleted file mode 100644
index 5877589..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-node-test.js
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { moduleForModel, test } from 'ember-qunit';
-
-moduleForModel('yarn-node', 'Unit | Model | Node', {
-  // Specify the other units that are required for this test.
-  needs: []
-});
-
-test('Basic creation test', function(assert) {
-  let model = this.subject();
-
-  assert.ok(model);
-  assert.ok(model._notifyProperties);
-  assert.ok(model.didLoad);
-  assert.ok(model.totalVmemAllocatedContainersMB);
-  assert.ok(model.vmemCheckEnabled);
-  assert.ok(model.pmemCheckEnabled);
-  assert.ok(model.nodeHealthy);
-  assert.ok(model.lastNodeUpdateTime);
-  assert.ok(model.healthReport);
-  assert.ok(model.nmStartupTime);
-  assert.ok(model.nodeManagerBuildVersion);
-  assert.ok(model.hadoopBuildVersion);
-});
-
-test('test fields', function(assert) {
-  let model = this.subject();
-
-  assert.expect(4);
-  Ember.run(function () {
-model.set("totalVmemAllocatedContainersMB", 4096);
-model.set("totalPmemAllocatedContainersMB", 2048);
-model.set("totalVCoresAllocatedContainers", 4);
-model.set("hadoopBuildVersion", "3.0.0-SNAPSHOT");
-assert.equal(model.get("totalVmemAllocatedContainersMB"), 4096);
-assert.equal(model.get("totalPmemAllocatedContainersMB"), 2048);
-assert.equal(model.get("totalVCoresAllocatedContainers"), 4);
-assert.equal(model.get("hadoopBuildVersion"), "3.0.0-SNAPSHOT");
-  });
-});
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb176dae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
deleted file mode 100644
index 4fd2517..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tests/unit/models/yarn-rm-node-test.js
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { moduleForModel, test } from 'ember-qunit';
-
-moduleForModel('yarn-rm-node', 'Unit | Model | RMNode', {
-  // Specify the other units that are required for this test.
-  needs: []
-});
-
-test('Basic creation test', function(assert) {
-  let model = this.subject();
-
-  assert.ok(model);
-  assert.ok(model._notifyProperties);
-  assert.ok(model.didLoad);
-  assert.ok(model.rack);
-  assert.ok(model.state);
-  assert.ok(model.nodeHostName);
-  assert.ok(model.nodeHTTPAddress);
-  assert.ok(model.lastHealthUpdate);
-  assert.ok(model.healthReport);
-  assert.ok(model.numContainers);
-  assert.ok(model.usedMemoryMB);
-  assert.ok(model.availMemoryMB);
-  assert.ok(model.usedVirtualCores);
-  assert.ok(model.availableVirtualCores);
-  assert.ok(model.version);
-  assert.ok(model.nodeLabels);
-  

[36/50] [abbrv] hadoop git commit: YARN-4849. Addendum patch to remove unwanted files from rat exclusions. (Wangda Tan via Sunil G)

2016-10-17 Thread wangda
YARN-4849. Addendum patch to remove unwanted files from rat exclusions. (Wangda 
Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/314bcba9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/314bcba9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/314bcba9

Branch: refs/heads/YARN-3368
Commit: 314bcba98e9d392f458770cac5590af9505e1c5c
Parents: e8a62af
Author: sunilg 
Authored: Fri Oct 14 18:23:04 2016 +0530
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  | 14 
 .../src/main/webapp/.editorconfig   | 34 
 2 files changed, 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/314bcba9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 440aca9..b427713 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -46,32 +46,18 @@
 apache-rat-plugin
 
   
-src/main/webapp/node_modules/**/*
-src/main/webapp/bower_components/**/*
 src/main/webapp/jsconfig.json
 src/main/webapp/bower.json
 src/main/webapp/package.json
 src/main/webapp/testem.json
-
-src/main/webapp/dist/**/*
-src/main/webapp/tmp/**/*
 src/main/webapp/public/assets/images/**/*
 src/main/webapp/public/assets/images/*
 src/main/webapp/public/robots.txt
-
-public/assets/images/**/*
 public/crossdomain.xml
-
-src/main/webapp/.tmp/**/*
 src/main/webapp/.bowerrc
-src/main/webapp/.editorconfig
 src/main/webapp/.ember-cli
-src/main/webapp/.gitignore
 src/main/webapp/.jshintrc
-src/main/webapp/.travis.yml
 src/main/webapp/.watchmanconfig
-src/main/webapp/tests/.jshintrc
-src/main/webapp/blueprints/.jshintrc
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/314bcba9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
deleted file mode 100644
index 47c5438..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
+++ /dev/null
@@ -1,34 +0,0 @@
-# EditorConfig helps developers define and maintain consistent
-# coding styles between different editors and IDEs
-# editorconfig.org
-
-root = true
-
-
-[*]
-end_of_line = lf
-charset = utf-8
-trim_trailing_whitespace = true
-insert_final_newline = true
-indent_style = space
-indent_size = 2
-
-[*.js]
-indent_style = space
-indent_size = 2
-
-[*.hbs]
-insert_final_newline = false
-indent_style = space
-indent_size = 2
-
-[*.css]
-indent_style = space
-indent_size = 2
-
-[*.html]
-indent_style = space
-indent_size = 2
-
-[*.{diff,md}]
-trim_trailing_whitespace = false


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: YARN-5488. [YARN-3368] Applications table overflows beyond the page boundary(Harish Jaiprakash via Sunil G)

2016-10-17 Thread wangda
YARN-5488. [YARN-3368] Applications table overflows beyond the page 
boundary(Harish Jaiprakash via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48b43965
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48b43965
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48b43965

Branch: refs/heads/YARN-3368
Commit: 48b43965698851bd03ea5874e3a80c2bd57bef0a
Parents: f30037d
Author: sunilg 
Authored: Fri Aug 12 14:51:03 2016 +0530
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 .../src/main/webapp/app/styles/app.css  |  4 +
 .../src/main/webapp/app/templates/yarn-app.hbs  | 98 ++--
 2 files changed, 54 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48b43965/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
index a68a0ac..da5b4bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
@@ -273,3 +273,7 @@ li a.navigation-link.ember-view {
   right: 20px;
   top: 3px;
 }
+
+.x-scroll {
+  overflow-x: scroll;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48b43965/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
index 49c4bfd..9e92fc1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
@@ -49,55 +49,57 @@
 
   
 Basic Info
-
-  
-
-  Application ID
-  Name
-  User
-  Queue
-  State
-  Final Status
-  Start Time
-  Elapsed Time
-  Finished Time
-  Priority
-  Progress
-  Is Unmanaged AM
-
-  
+
+  
+
+  
+Application ID
+Name
+User
+Queue
+State
+Final Status
+Start Time
+Elapsed Time
+Finished Time
+Priority
+Progress
+Is Unmanaged AM
+  
+
 
-  
-
-  {{model.app.id}}
-  {{model.app.appName}}
-  {{model.app.user}}
-  {{model.app.queue}}
-  {{model.app.state}}
-  
-
-  {{model.app.finalStatus}}
-
-  
-  {{model.app.startTime}}
-  {{model.app.elapsedTime}}
-  {{model.app.validatedFinishedTs}}
-  {{model.app.priority}}
-  
-
-  
-{{model.app.progress}}%
+
+  
+{{model.app.id}}
+{{model.app.appName}}
+{{model.app.user}}
+{{model.app.queue}}
+{{model.app.state}}
+
+  
+{{model.app.finalStatus}}
+  
+
+{{model.app.startTime}}
+{{model.app.elapsedTime}}
+{{model.app.validatedFinishedTs}}
+{{model.app.priority}}
+
+  
+
+  {{model.app.progress}}%
+
   
-
-  
-  {{model.app.unmanagedApplication}}
-
-  
-
+
+{{model.app.unmanagedApplication}}
+  
+
+  
+ 

[24/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-17 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb176dae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
new file mode 100644
index 000..4e68da0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+import Constants from 'yarn-ui/constants';
+
+moduleFor('route:yarn-container-log', 'Unit | Route | ContainerLog', {
+});
+
+test('Basic creation test', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+  assert.ok(route.model);
+});
+
+test('Test getting container log', function(assert) {
+  var response = {
+  logs: "This is syslog",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve) {
+resolve(response);
+  }
+)}
+  };
+  assert.expect(6);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.containerLog);
+ assert.deepEqual(value.containerLog, response);
+ assert.ok(value.nodeInfo);
+ assert.equal(value.nodeInfo.addr, 'localhost:8042');
+ assert.equal(value.nodeInfo.id, 'localhost:64318');
+   });
+});
+
+/**
+ * This can happen when an empty response is sent from server
+ */
+test('Test non HTTP error while getting container log', function(assert) {
+  var error = {};
+  var response = {
+  logs: "",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve, reject) {
+reject(error);
+  }
+)}
+  };
+  assert.expect(6);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.containerLog);
+ assert.deepEqual(value.containerLog, response);
+ assert.ok(value.nodeInfo);
+ assert.equal(value.nodeInfo.addr, 'localhost:8042');
+ assert.equal(value.nodeInfo.id, 'localhost:64318');
+   });
+});
+
+test('Test HTTP error while getting container log', function(assert) {
+  var error = {errors: [{status: 404, responseText: 'Not Found'}]};
+  var response = {
+  logs: "",
+  containerID: "container_e32_1456000363780_0002_01_01",
+  logFileName: "syslog"};
+  var store = {
+findRecord: function(type) {
+  return new Ember.RSVP.Promise(function(resolve, reject) {
+reject(error);
+  }
+)}
+  };
+  assert.expect(5);
+  var route = this.subject();
+  route.set('store', store);
+  var model = route.model({node_id: "localhost:64318",
+  node_addr: "localhost:8042",
+  container_id: "container_e32_1456000363780_0002_01_01",
+  filename: "syslog"});
+   model.then(function(value) {
+ assert.ok(value);
+ assert.ok(value.errors);
+ assert.equal(value.errors.length, 1);
+ assert.equal(value.errors[0].status, 404);
+ assert.equal(value.errors[0].responseText, 'Not Found');
+   });
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb176dae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-app-test.js
--
diff 

[46/50] [abbrv] hadoop git commit: YARN-4849. Addendum patch to fix license. (Wangda Tan via Sunil G)

2016-10-17 Thread wangda
YARN-4849. Addendum patch to fix license. (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/182f65a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/182f65a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/182f65a6

Branch: refs/heads/YARN-3368
Commit: 182f65a661c0e7be1c775545200c9f5aa0d68695
Parents: 20e5519
Author: sunilg 
Authored: Wed Aug 24 16:28:34 2016 +0530
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 LICENSE.txt | 84 ++--
 1 file changed, 51 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/182f65a6/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 5efbd14..05743fe 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1869,35 +1869,53 @@ be bound by any additional provisions that may appear 
in any communication from
 You. This License may not be modified without the mutual written agreement of
 the Licensor and You.
 
-For Apache Hadoop YARN Web UI component: 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/
--
-The Apache Hadoop YARN Web UI component bundles the following files under the 
MIT License:
-
- - ember v2.2.0 (http://emberjs.com/) - Copyright (c) 2014 Yehuda Katz, Tom 
Dale and Ember.js contributors
- - ember-data v2.1.0 (https://github.com/emberjs/data) - Copyright (C) 
2011-2014 Tilde, Inc. and contributors, Portions Copyright (C) 2011 
LivingSocial Inc.
- - ember-resolver v2.0.3 (https://github.com/ember-cli/ember-resolver) - 
Copyright (c) 2013 Stefan Penner and Ember App Kit Contributors
- - bootstrap v3.3.6 (http://getbootstrap.com) - Copyright (c) 2011-2014 
Twitter, Inc
- - jquery v2.1.4 (http://jquery.org) - Copyright 2005, 2014 jQuery Foundation 
and other contributors
- - jquery-ui v1.11.4 (http://jqueryui.com/) - Copyright 2014 jQuery Foundation 
and other contributors
- - datatables v1.10.8 (https://datatables.net/)
- - moment v2.10.6 (http://momentjs.com/) - Copyright (c) 2011-2015 Tim Wood, 
Iskren Chernev, Moment.js contributors
- - em-helpers v0.5.8 (https://github.com/sreenaths/em-helpers)
- - ember-array-contains-helper v1.0.2 
(https://github.com/bmeurant/ember-array-contains-helper)
- - ember-cli-app-version v0.5.8 
(https://github.com/EmberSherpa/ember-cli-app-version) - Authored by Taras 
Mankovski 
- - ember-cli-babel v5.1.6 (https://github.com/babel/ember-cli-babel) - 
Authored by Stefan Penner 
- - ember-cli-content-security-policy v0.4.0 
(https://github.com/rwjblue/ember-cli-content-security-policy)
- - ember-cli-dependency-checker v1.2.0 
(https://github.com/quaertym/ember-cli-dependency-checker) - Authored by Emre 
Unal
- - ember-cli-htmlbars v1.0.2 (https://github.com/ember-cli/ember-cli-htmlbars) 
- Authored by Robert Jackson 
- - ember-cli-htmlbars-inline-precompile v0.3.1 
(https://github.com/pangratz/ember-cli-htmlbars-inline-precompile) - Authored 
by Clemens Müller 
- - ember-cli-ic-ajax v0.2.1 (https://github.com/rwjblue/ember-cli-ic-ajax) - 
Authored by Robert Jackson 
- - ember-cli-inject-live-reload v1.4.0 
(https://github.com/rwjblue/ember-cli-inject-live-reload) - Authored by Robert 
Jackson 
- - ember-cli-qunit v1.2.1 (https://github.com/ember-cli/ember-cli-qunit) - 
Authored by Robert Jackson 
- - ember-cli-release v0.2.8 (https://github.com/lytics/ember-cli-release) - 
Authored by Robert Jackson 
- - ember-cli-sri v1.2.1 (https://github.com/jonathanKingston/ember-cli-sri) - 
Authored by Jonathan Kingston
- - ember-cli-uglify v1.2.0 (github.com/ember-cli/ember-cli-uglify) - Authored 
by Robert Jackson 
- - ember-d3 v0.1.0 (https://github.com/brzpegasus/ember-d3) - Authored by 
Estelle DeBlois
- - ember-truth-helpers v1.2.0 
(https://github.com/jmurphyau/ember-truth-helpers)
- - select2 v4.0.0 (https://select2.github.io/)
+The binary distribution of this product bundles these dependencies under the
+following license:
+bootstrap v3.3.6
+broccoli-asset-rev v2.4.2
+broccoli-funnel v1.0.1
+datatables v1.10.8
+em-helpers v0.5.13
+em-table v0.1.6
+ember v2.2.0
+ember-array-contains-helper v1.0.2
+ember-bootstrap v0.5.1
+ember-cli v1.13.13
+ember-cli-app-version v1.0.0
+ember-cli-babel v5.1.6
+ember-cli-content-security-policy v0.4.0
+ember-cli-dependency-checker v1.2.0
+ember-cli-htmlbars v1.0.2
+ember-cli-htmlbars-inline-precompile v0.3.1
+ember-cli-ic-ajax v0.2.1
+ember-cli-inject-live-reload v1.4.0
+ember-cli-jquery-ui v0.0.20

[22/50] [abbrv] hadoop git commit: YARN-5321. [YARN-3368] Add resource usage for application by node managers (Wangda Tan via Sunil G) YARN-5320. [YARN-3368] Add resource usage by applications and que

2016-10-17 Thread wangda
YARN-5321. [YARN-3368] Add resource usage for application by node managers 
(Wangda Tan via Sunil G)
YARN-5320. [YARN-3368] Add resource usage by applications and queues to cluster 
overview page  (Wangda Tan via Sunil G)
YARN-5322. [YARN-3368] Add a node heat chart map (Wangda Tan via Sunil G)
YARN-5347. [YARN-3368] Applications page improvements (Sreenath Somarajapuram 
via Sunil G)
YARN-5348. [YARN-3368] Node details page improvements (Sreenath Somarajapuram 
via Sunil G)
YARN-5346. [YARN-3368] Queues page improvements (Sreenath Somarajapuram via 
Sunil G)
YARN-5345. [YARN-3368] Cluster overview page improvements (Sreenath 
Somarajapuram via Sunil G)
YARN-5344. [YARN-3368] Generic UI improvements (Sreenath Somarajapuram via 
Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87f22ffb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87f22ffb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87f22ffb

Branch: refs/heads/YARN-3368
Commit: 87f22ffb3b42b7eb4b9e8df1b5dc352f7801eb18
Parents: 8fe2daf
Author: Sunil 
Authored: Fri Jul 15 21:16:06 2016 +0530
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 .../src/main/webapp/app/adapters/yarn-app.js|  14 +
 .../app/components/app-usage-donut-chart.js |  67 
 .../src/main/webapp/app/components/bar-chart.js |   5 +
 .../app/components/base-chart-component.js  |  55 ++-
 .../app/components/base-usage-donut-chart.js|  43 +++
 .../main/webapp/app/components/donut-chart.js   |  55 ++-
 .../main/webapp/app/components/nodes-heatmap.js | 209 +++
 ...er-app-memusage-by-nodes-stacked-barchart.js |  88 +
 ...app-ncontainers-by-nodes-stacked-barchart.js |  67 
 .../app/components/queue-usage-donut-chart.js   |  69 
 .../main/webapp/app/components/queue-view.js|   3 +-
 .../main/webapp/app/components/simple-table.js  |   9 +-
 .../webapp/app/components/stacked-barchart.js   | 198 +++
 .../main/webapp/app/components/timeline-view.js |   2 +-
 .../main/webapp/app/components/tree-selector.js |  43 ++-
 .../webapp/app/controllers/cluster-overview.js  |   9 +
 .../webapp/app/controllers/yarn-app-attempt.js  |  40 +++
 .../webapp/app/controllers/yarn-app-attempts.js |  40 +++
 .../src/main/webapp/app/controllers/yarn-app.js |  38 ++
 .../main/webapp/app/controllers/yarn-apps.js|   9 +
 .../webapp/app/controllers/yarn-node-apps.js|  39 +++
 .../app/controllers/yarn-node-containers.js |  39 +++
 .../main/webapp/app/controllers/yarn-node.js|  37 ++
 .../app/controllers/yarn-nodes-heatmap.js   |  36 ++
 .../main/webapp/app/controllers/yarn-nodes.js   |  33 ++
 .../webapp/app/controllers/yarn-queue-apps.js   |  46 +++
 .../main/webapp/app/controllers/yarn-queue.js   |  20 ++
 .../main/webapp/app/controllers/yarn-queues.js  |  34 ++
 .../webapp/app/controllers/yarn-services.js |  34 ++
 .../main/webapp/app/models/cluster-metric.js|   2 +-
 .../main/webapp/app/models/yarn-app-attempt.js  |  11 +
 .../src/main/webapp/app/models/yarn-app.js  |   4 +
 .../src/main/webapp/app/models/yarn-rm-node.js  |   7 +
 .../src/main/webapp/app/router.js   |  15 +-
 .../src/main/webapp/app/routes/application.js   |   2 +
 .../main/webapp/app/routes/cluster-overview.js  |   9 +-
 .../main/webapp/app/routes/yarn-app-attempts.js |  30 ++
 .../src/main/webapp/app/routes/yarn-app.js  |  17 +-
 .../src/main/webapp/app/routes/yarn-apps.js |   6 +-
 .../main/webapp/app/routes/yarn-apps/apps.js|  22 ++
 .../webapp/app/routes/yarn-apps/services.js |  22 ++
 .../src/main/webapp/app/routes/yarn-node.js |   1 +
 .../src/main/webapp/app/routes/yarn-nodes.js|   5 +-
 .../webapp/app/routes/yarn-nodes/heatmap.js |  22 ++
 .../main/webapp/app/routes/yarn-nodes/table.js  |  22 ++
 .../main/webapp/app/routes/yarn-queue-apps.js   |  36 ++
 .../src/main/webapp/app/routes/yarn-queues.js   |  38 ++
 .../webapp/app/serializers/yarn-app-attempt.js  |  19 +-
 .../src/main/webapp/app/serializers/yarn-app.js |   8 +-
 .../webapp/app/serializers/yarn-container.js|  20 +-
 .../src/main/webapp/app/styles/app.css  | 139 ++--
 .../main/webapp/app/templates/application.hbs   |  99 --
 .../webapp/app/templates/cluster-overview.hbs   | 168 ++---
 .../app/templates/components/app-table.hbs  |  10 +-
 .../templates/components/node-menu-panel.hbs|   2 +-
 .../app/templates/components/nodes-heatmap.hbs  |  27 ++
 .../components/queue-configuration-table.hbs|   4 -
 .../templates/components/queue-navigator.hbs|  14 +-
 .../app/templates/components/timeline-view.hbs  |   3 +-
 .../webapp/app/templates/yarn-app-attempt.hbs   |  13 +-
 .../webapp/app/templates/yarn-app-attempts.hbs  |  57 +++
 .../src/main/webapp/app/templates/yarn-app.hbs  | 346 ---
 

[44/50] [abbrv] hadoop git commit: YARN-5145. [YARN-3368] Move new YARN UI configuration to HADOOP_CONF_DIR. (Sunil G via wangda)

2016-10-17 Thread wangda
YARN-5145. [YARN-3368] Move new YARN UI configuration to HADOOP_CONF_DIR. 
(Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5382eb25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5382eb25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5382eb25

Branch: refs/heads/YARN-3368
Commit: 5382eb25f07676a418bc539509eb9e1a4306a04b
Parents: 314bcba
Author: Wangda Tan 
Authored: Mon Oct 17 11:30:16 2016 -0700
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 .../src/main/webapp/app/initializers/loader.js  | 86 
 .../tests/unit/initializers/loader-test.js  | 40 +
 2 files changed, 126 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5382eb25/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
new file mode 100644
index 000..08e4dbd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+function getTimeLineURL(parameters) {
+  return '/conf?name=yarn.timeline-service.webapp.address';
+}
+
+function updateConfigs(application) {
+  var hostname = window.location.hostname;
+  var rmhost = hostname +
+(window.location.port ? ':' + window.location.port: '');
+
+  Ember.Logger.log("RM Address:" + rmhost);
+
+  if(!ENV.hosts.rmWebAddress) {
+ENV = {
+   hosts: {
+  rmWebAddress: rmhost,
+},
+};
+  }
+
+  if(!ENV.hosts.timelineWebAddress) {
+var result = [];
+var timelinehost = "";
+$.ajax({
+  type: 'GET',
+  dataType: 'json',
+  async: true,
+  context: this,
+  url: getTimeLineURL(),
+  success: function(data) {
+timelinehost = data.property.value;
+ENV.hosts.timelineWebAddress = timelinehost;
+
+var address = timelinehost.split(":")[0];
+var port = timelinehost.split(":")[1];
+
+Ember.Logger.log("Timeline Address from RM:" + address + ":" + port);
+
+if(address == "0.0.0.0" || address == "localhost") {
+  var updatedAddress =  hostname + ":" + port;
+
+  /* Timeline v2 is not supporting CORS, so make as default*/
+  ENV = {
+ hosts: {
+rmWebAddress: rmhost,
+timelineWebAddress: updatedAddress,
+  },
+  };
+  Ember.Logger.log("Timeline Updated Address:" + updatedAddress);
+}
+application.advanceReadiness();
+  },
+});
+  } else {
+application.advanceReadiness();
+  }
+}
+
+export function initialize( application ) {
+  application.deferReadiness();
+  updateConfigs(application);
+}
+
+export default {
+  name: 'loader',
+  before: 'env',
+  initialize
+};

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5382eb25/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/initializers/loader-test.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/initializers/loader-test.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/initializers/loader-test.js
new file mode 100644
index 000..cc32e92
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/initializers/loader-test.js
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you 

[26/50] [abbrv] hadoop git commit: YARN-4849. [YARN-3368] cleanup code base, integrate web UI related build to mvn, and fix licenses. (wangda)

2016-10-17 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb176dae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
new file mode 100644
index 000..89858bf
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+  model(param) {
+return Ember.RSVP.hash({
+  selected : param.queue_name,
+  queues: this.store.findAll('yarnQueue'),
+  selectedQueue : undefined,
+  apps: undefined, // apps of selected queue
+});
+  },
+
+  afterModel(model) {
+model.selectedQueue = this.store.peekRecord('yarnQueue', model.selected);
+model.apps = this.store.findAll('yarnApp');
+model.apps.forEach(function(o) {
+  console.log(o);
+})
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb176dae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
new file mode 100644
index 000..7da6f6d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export default Ember.Route.extend({
+  beforeModel() {
+this.transitionTo('yarnQueues.root');
+  }
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb176dae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
new file mode 100644
index 000..3686c83
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+

[11/50] [abbrv] hadoop git commit: HDFS-11003. Expose XmitsInProgress through DataNodeMXBean. Contributed By Brahma Reddy Battula

2016-10-17 Thread wangda
HDFS-11003. Expose XmitsInProgress through DataNodeMXBean. Contributed By 
Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f4ae85b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f4ae85b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f4ae85b

Branch: refs/heads/YARN-3368
Commit: 5f4ae85bd8a20510948696467873498723b06477
Parents: 5ad037d
Author: Brahma Reddy Battula 
Authored: Sat Oct 15 22:28:33 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Sat Oct 15 22:28:33 2016 +0530

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java  | 5 +++--
 .../org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java | 6 ++
 .../apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java | 6 +-
 3 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4ae85b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index cb8e308..8f65efe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2101,8 +2101,9 @@ public class DataNode extends ReconfigurableBase
   }
 }
   }
-  
-  int getXmitsInProgress() {
+
+  @Override //DataNodeMXBean
+  public int getXmitsInProgress() {
 return xmitsInProgress.get();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4ae85b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
index 5ec4cda..5d4c218 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
@@ -101,6 +101,12 @@ public interface DataNodeMXBean {
   public int getXceiverCount();
 
   /**
+   * Returns an estimate of the number of data replication/reconstruction tasks
+   * running currently.
+   */
+  public int getXmitsInProgress();
+
+  /**
* Gets the network error counts on a per-Datanode basis.
*/
   public Map> getDatanodeNetworkCounts();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4ae85b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index 8b0d5cb..a77c943 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -96,7 +96,11 @@ public class TestDataNodeMXBean {
   int xceiverCount = (Integer)mbs.getAttribute(mxbeanName,
   "XceiverCount");
   Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
-
+  // Ensure mxbean's XmitsInProgress is same as the DataNode's
+  // live value.
+  int xmitsInProgress =
+  (Integer) mbs.getAttribute(mxbeanName, "XmitsInProgress");
+  Assert.assertEquals(datanode.getXmitsInProgress(), xmitsInProgress);
   String bpActorInfo = (String)mbs.getAttribute(mxbeanName,
   "BPServiceActorInfo");
   Assert.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: YARN-4515. [YARN-3368] Support hosting web UI framework inside YARN RM. (Sunil G via wangda) YARN-5000. [YARN-3368] App attempt page is not loading when timeline ser

2016-10-17 Thread wangda
YARN-4515. [YARN-3368] Support hosting web UI framework inside YARN RM. (Sunil 
G via wangda)
YARN-5000. [YARN-3368] App attempt page is not loading when timeline server is 
not started (Sunil G via wangda)
YARN-5038. [YARN-3368] Application and Container pages shows wrong values when 
RM is stopped. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31baeb7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31baeb7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31baeb7d

Branch: refs/heads/YARN-3368
Commit: 31baeb7d08d504bec4792557ba68506ec95fd137
Parents: 6a79416
Author: Wangda Tan 
Authored: Tue May 17 22:28:24 2016 -0700
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 LICENSE.txt |  2 +
 .../resources/assemblies/hadoop-yarn-dist.xml   |  7 ++
 .../hadoop/yarn/conf/YarnConfiguration.java | 23 ++
 .../src/main/resources/yarn-default.xml | 26 +++
 .../server/resourcemanager/ResourceManager.java | 76 +---
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  |  4 +-
 .../webapp/app/adapters/yarn-app-attempt.js |  4 +-
 .../webapp/app/adapters/yarn-container-log.js   |  2 +-
 .../main/webapp/app/adapters/yarn-node-app.js   | 10 ++-
 .../webapp/app/adapters/yarn-node-container.js  | 10 ++-
 .../src/main/webapp/app/adapters/yarn-node.js   |  5 +-
 .../main/webapp/app/components/timeline-view.js | 17 +++--
 .../main/webapp/app/components/tree-selector.js |  4 +-
 .../main/webapp/app/helpers/log-files-comma.js  |  2 +-
 .../src/main/webapp/app/helpers/node-link.js|  2 +-
 .../src/main/webapp/app/helpers/node-menu.js|  6 +-
 .../src/main/webapp/app/helpers/node-name.js| 46 
 .../main/webapp/app/models/yarn-app-attempt.js  | 72 ++-
 .../src/main/webapp/app/models/yarn-app.js  | 14 
 .../main/webapp/app/models/yarn-container.js|  7 ++
 .../main/webapp/app/routes/yarn-app-attempt.js  |  6 +-
 .../webapp/app/serializers/yarn-app-attempt.js  |  5 +-
 .../src/main/webapp/app/serializers/yarn-app.js | 11 ++-
 .../webapp/app/serializers/yarn-container.js|  3 +-
 .../webapp/app/serializers/yarn-node-app.js |  5 +-
 .../app/serializers/yarn-node-container.js  |  5 +-
 .../main/webapp/app/serializers/yarn-rm-node.js |  5 +-
 .../main/webapp/app/templates/application.hbs   | 21 +-
 .../templates/components/app-attempt-table.hbs  | 22 +-
 .../app/templates/components/app-table.hbs  |  8 +--
 .../templates/components/container-table.hbs|  4 +-
 .../templates/components/node-menu-panel.hbs| 44 
 .../app/templates/components/timeline-view.hbs  |  2 +-
 .../src/main/webapp/app/templates/error.hbs |  2 +-
 .../webapp/app/templates/yarn-app-attempt.hbs   |  4 ++
 .../src/main/webapp/app/templates/yarn-app.hbs  |  2 +-
 .../src/main/webapp/app/templates/yarn-apps.hbs |  9 ++-
 .../main/webapp/app/templates/yarn-node-app.hbs |  4 +-
 .../webapp/app/templates/yarn-node-apps.hbs | 12 ++--
 .../app/templates/yarn-node-container.hbs   |  2 +-
 .../app/templates/yarn-node-containers.hbs  | 12 ++--
 .../src/main/webapp/app/templates/yarn-node.hbs |  2 +-
 .../main/webapp/app/templates/yarn-nodes.hbs| 10 ++-
 .../main/webapp/app/templates/yarn-queue.hbs|  8 ++-
 .../src/main/webapp/config/environment.js   |  2 +-
 .../hadoop-yarn-ui/src/main/webapp/package.json |  2 +
 .../webapp/tests/unit/helpers/node-name-test.js | 28 
 47 files changed, 486 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31baeb7d/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 45b6cdf..5efbd14 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1882,6 +1882,7 @@ The Apache Hadoop YARN Web UI component bundles the 
following files under the MI
  - datatables v1.10.8 (https://datatables.net/)
  - moment v2.10.6 (http://momentjs.com/) - Copyright (c) 2011-2015 Tim Wood, 
Iskren Chernev, Moment.js contributors
  - em-helpers v0.5.8 (https://github.com/sreenaths/em-helpers)
+ - ember-array-contains-helper v1.0.2 
(https://github.com/bmeurant/ember-array-contains-helper)
  - ember-cli-app-version v0.5.8 
(https://github.com/EmberSherpa/ember-cli-app-version) - Authored by Taras 
Mankovski 
  - ember-cli-babel v5.1.6 (https://github.com/babel/ember-cli-babel) - 
Authored by Stefan Penner 
  - ember-cli-content-security-policy v0.4.0 
(https://github.com/rwjblue/ember-cli-content-security-policy)
@@ -1895,6 +1896,7 @@ The Apache Hadoop YARN Web UI component bundles the 
following files under the MI
  - ember-cli-sri 

[40/50] [abbrv] hadoop git commit: YARN-4849. Addendum patch to fix document. (Wangda Tan via Sunil G)

2016-10-17 Thread wangda
YARN-4849. Addendum patch to fix document. (Wangda Tan via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20e55199
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20e55199
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20e55199

Branch: refs/heads/YARN-3368
Commit: 20e55199799cca21247acf93b5ec0afbe180a70b
Parents: 48b4396
Author: sunilg 
Authored: Wed Aug 24 16:10:19 2016 +0530
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 BUILDING.txt|  2 +-
 .../src/site/markdown/YarnUI2.md| 36 +++-
 2 files changed, 21 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20e55199/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 4424579..908c366 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -130,7 +130,7 @@ Maven build goals:
   * Use -Psrc to create a project source TAR.GZ
   * Use -Dtar to create a TAR with the distribution (using -Pdist)
   * Use -Preleasedocs to include the changelog and release docs (requires 
Internet connectivity)
-  * Use -Pyarn-ui to build YARN UI v2. (Requires Internet connectivity, and it 
is for dev use only)
+  * Use -Pyarn-ui to build YARN UI v2. (Requires Internet connectivity)
 
  Snappy build options:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20e55199/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
index 575ebc7..ff48183 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnUI2.md
@@ -17,27 +17,31 @@
 
 Hadoop: YARN-UI V2
 =
-*This is a WIP project, nobody should use it in production.*
 
 Prerequisites
 -
 
-You will need the following things properly installed on your computer.
+If you run RM locally in your computer for test purpose, you need the 
following things properly installed.
 
-* Install Node.js with NPM: https://nodejs.org/download/
-* After Node.js installed, install bower: `npm install -g bower`.
-* Install Ember-cli: `npm install -g ember-cli`
+- Install Node.js with NPM: https://nodejs.org/download
+- After Node.js installed, install `corsproxy`: `npm install -g corsproxy`.
 
-BUILD
-
-* Please refer to BUILDING.txt in the top directory and pass -Pyarn-ui to 
build UI-related code
-* Execute `mvn test -Pyarn-ui` to run unit tests
 
-Try it
---
+Configurations
+-
+
+*In yarn-site.xml*
+
+| Configuration Property | Description |
+|: |: |
+| `yarn.resourcemanager.webapp.ui2.enable` | In the server side it indicates 
whether the new YARN-UI v2 is enabled or not. Defaults to `false`. |
+| `yarn.resourcemanager.webapp.ui2.address` | Specify the address of 
ResourceManager and port which host YARN-UI v2, defaults to `localhost:8288`. |
 
-* Packaging and deploying Hadoop in this branch
-* In 
`hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/config.js`, 
change `timelineWebUrl` and `rmWebUrl` to your YARN RM/Timeline server web 
address. 
-* If you are running YARN RM in your localhost, you should update 
`localBaseUrl` to `localhost:1337/`, install `npm install -g corsproxy` and run 
`corsproxy` to avoid CORS errors. More details: 
`https://www.npmjs.com/package/corsproxy`. 
-* Run `ember serve` under 
`hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/`
-* Visit your app at [http://localhost:4200](http://localhost:4200).
+*In $HADOOP_PREFIX/share/hadoop/yarn/webapps/rm/config/configs.env*
+
+- Update timelineWebAddress and rmWebAddress to the actual addresses run 
resource manager and timeline server
+- If you run RM locally in you computer just for test purpose, you need to 
keep `corsproxy` running. Otherwise, you need to set `localBaseAddress` to 
empty.
+
+Use it
+-
+Open your browser, go to `rm-address:8288` and try it!


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: YARN-5161. [YARN-3368] Add Apache Hadoop logo in YarnUI home page. (Kai Sasaki via Sunil G)

2016-10-17 Thread wangda
YARN-5161. [YARN-3368] Add Apache Hadoop logo in YarnUI home page. (Kai Sasaki 
via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fe2dafc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fe2dafc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fe2dafc

Branch: refs/heads/YARN-3368
Commit: 8fe2dafc67e9a67b6dfeb5df51186d1c8e59ef8d
Parents: 80b88be
Author: Sunil 
Authored: Mon Jul 11 14:31:25 2016 +0530
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 .../src/main/webapp/app/styles/app.css |  11 +++
 .../src/main/webapp/app/templates/application.hbs  |  12 +++-
 .../webapp/public/assets/images/hadoop_logo.png| Bin 0 -> 26495 bytes
 3 files changed, 18 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fe2dafc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
index bcb6aab..e2d09dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
@@ -157,3 +157,14 @@ table.dataTable thead .sorting_desc_disabled {
   stroke: #ccc;  
   stroke-width: 2px;
 }
+
+.hadoop-brand-image {
+  margin-top: -10px;
+  width: auto;
+  height: 45px;
+}
+
+li a.navigation-link.ember-view {
+  color: #2196f3;
+  font-weight: bold;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fe2dafc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
index b45ec6b..03b2c4a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
@@ -20,35 +20,37 @@
   
 
 
+  
+
+  
   
 Toggle navigation
 
 
 
   
-  Apache Hadoop YARN
 
 
 
 
   
 {{#link-to 'yarn-queue' 'root' tagName="li"}}
-  {{#link-to 'yarn-queue' 'root'}}Queues
+  {{#link-to 'yarn-queue' 'root' class="navigation-link"}}Queues
 (current)
   {{/link-to}}
 {{/link-to}}
 {{#link-to 'yarn-apps' tagName="li"}}
-  {{#link-to 'yarn-apps'}}Applications
+  {{#link-to 'yarn-apps' class="navigation-link"}}Applications
 (current)
   {{/link-to}}
 {{/link-to}}
 {{#link-to 'cluster-overview' tagName="li"}}
-  {{#link-to 'cluster-overview'}}Cluster Overview
+  {{#link-to 'cluster-overview' class="navigation-link"}}Cluster 
Overview
 (current)
   {{/link-to}}
 {{/link-to}}
 {{#link-to 'yarn-nodes' tagName="li"}}
-  {{#link-to 'yarn-nodes'}}Nodes
+  {{#link-to 'yarn-nodes' class="navigation-link"}}Nodes
 (current)
   {{/link-to}}
 {{/link-to}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fe2dafc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
new file mode 100644
index 000..275d39e
Binary files /dev/null and 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/public/assets/images/hadoop_logo.png
 differ


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] [abbrv] hadoop git commit: YARN-5509. Build error due to preparing 3.0.0-alpha2 deployment. (Kai Sasaki via wangda)

2016-10-17 Thread wangda
YARN-5509. Build error due to preparing 3.0.0-alpha2 deployment. (Kai Sasaki 
via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f30037de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f30037de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f30037de

Branch: refs/heads/YARN-3368
Commit: f30037deee7571efa28333c0678ad2fb9ddc3e26
Parents: 22c4b49
Author: Wangda Tan 
Authored: Thu Aug 11 14:59:14 2016 -0700
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f30037de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 6d46fda..2933a76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -20,12 +20,12 @@
   
 hadoop-yarn
 org.apache.hadoop
-3.0.0-alpha1-SNAPSHOT
+3.0.0-alpha2-SNAPSHOT
   
   4.0.0
   org.apache.hadoop
   hadoop-yarn-ui
-  3.0.0-alpha1-SNAPSHOT
+  3.0.0-alpha2-SNAPSHOT
   Apache Hadoop YARN UI
   ${packaging.type}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: YARN-4517. Add nodes page and fix bunch of license issues. (Varun Saxena via wangda)

2016-10-17 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f115a1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs
new file mode 100644
index 000..c546bf7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/error.hbs
@@ -0,0 +1,19 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+Sorry, Error Occured.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f115a1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs
new file mode 100644
index 000..588ea44
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/notfound.hbs
@@ -0,0 +1,20 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}
+
+404, Not Found
+Please Check your URL

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f115a1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
index e58d6bd..3a79080 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-apps.hbs
@@ -1,3 +1,3 @@
 {{app-table table-id="apps-table" arr=model}}
-{{simple-table table-id="apps-table" bFilter=true colTypes="elapsed-time" 
colTargets="7"}}
-{{outlet}}
\ No newline at end of file
+{{simple-table table-id="apps-table" bFilter=true colsOrder="0,desc" 
colTypes="natural elapsed-time" colTargets="0 7"}}
+{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f115a1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
new file mode 100644
index 000..9cc3b0f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/app/templates/yarn-container-log.hbs
@@ -0,0 +1,36 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+--}}

[32/50] [abbrv] hadoop git commit: YARN-5504. [YARN-3368] Fix YARN UI build pom.xml (Sreenath Somarajapuram via Sunil G)

2016-10-17 Thread wangda
YARN-5504. [YARN-3368] Fix YARN UI build pom.xml (Sreenath Somarajapuram via 
Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ef1c371
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ef1c371
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ef1c371

Branch: refs/heads/YARN-3368
Commit: 9ef1c37146ce06838ccc7168d3e0bc73aaf0b21b
Parents: 182f65a
Author: sunilg 
Authored: Thu Aug 25 23:21:29 2016 +0530
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  | 59 +---
 .../src/main/webapp/ember-cli-build.js  |  2 +-
 .../hadoop-yarn-ui/src/main/webapp/package.json |  3 +-
 3 files changed, 17 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ef1c371/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 2933a76..fca8d30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -35,7 +35,7 @@
 node
 v0.12.2
 2.10.0
-false
+false
   
 
   
@@ -60,19 +60,20 @@
   
 
   
- maven-clean-plugin
- 3.0.0
- 
-false
-
-   
-  
${basedir}/src/main/webapp/bower_components
-   
-   
-  
${basedir}/src/main/webapp/node_modules
-   
-
- 
+maven-clean-plugin
+3.0.0
+
+  ${keep-ui-build-cache}
+  false
+  
+
+  
${basedir}/src/main/webapp/bower_components
+
+
+  ${basedir}/src/main/webapp/node_modules
+
+  
+
   
 
   
@@ -126,21 +127,6 @@
 
   
   
-generate-sources
-bower --allow-root install
-
-  exec
-
-
-  ${webappDir}
-  bower
-  
---allow-root
-install
-  
-
-  
-  
 ember build
 generate-sources
 
@@ -158,21 +144,6 @@
 
   
   
-ember test
-generate-resources
-
-  exec
-
-
-  ${skipTests}
-  ${webappDir}
-  ember
-  
-test
-  
-
-  
-  
 cleanup tmp
 generate-sources
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ef1c371/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
index d21cc3e..7736c75 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
@@ -22,7 +22,7 @@ var EmberApp = require('ember-cli/lib/broccoli/ember-app');
 
 module.exports = function(defaults) {
   var app = new EmberApp(defaults, {
-// Add options here
+hinting: false
   });
 
   
app.import("bower_components/datatables/media/css/jquery.dataTables.min.css");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ef1c371/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
index baa473a..6a4eb16 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
@@ -9,8 +9,7 @@
   },
   "scripts": {
 "build": "ember build",
-"start": "ember server",
-"test": "ember test"
+"start": "ember server"
   },
   "repository": "",
   "engines": {


-
To unsubscribe, e-mail: 

[34/50] [abbrv] hadoop git commit: YARN-5503. [YARN-3368] Add missing hidden files in webapp folder for deployment (Sreenath Somarajapuram via Sunil G)

2016-10-17 Thread wangda
YARN-5503. [YARN-3368] Add missing hidden files in webapp folder for deployment 
(Sreenath Somarajapuram via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d5de848
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d5de848
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d5de848

Branch: refs/heads/YARN-3368
Commit: 3d5de84860bec3f448f9d71b5848b2cdadc7cb6e
Parents: a1a34a3
Author: sunilg 
Authored: Tue Aug 30 20:58:35 2016 +0530
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  | 19 ++-
 .../hadoop-yarn-ui/src/main/webapp/.bowerrc |  4 +++
 .../src/main/webapp/.editorconfig   | 34 
 .../hadoop-yarn-ui/src/main/webapp/.ember-cli   |  9 ++
 .../hadoop-yarn-ui/src/main/webapp/.jshintrc| 32 ++
 .../src/main/webapp/.watchmanconfig |  3 ++
 6 files changed, 100 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d5de848/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index fca8d30..b750a73 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -30,7 +30,7 @@
   ${packaging.type}
 
   
-jar
+war
 src/main/webapp
 node
 v0.12.2
@@ -52,9 +52,26 @@
 src/main/webapp/bower.json
 src/main/webapp/package.json
 src/main/webapp/testem.json
+
+src/main/webapp/dist/**/*
+src/main/webapp/tmp/**/*
 src/main/webapp/public/assets/images/**/*
+src/main/webapp/public/assets/images/*
 src/main/webapp/public/robots.txt
+
+public/assets/images/**/*
 public/crossdomain.xml
+
+src/main/webapp/.tmp/**/*
+src/main/webapp/.bowerrc
+src/main/webapp/.editorconfig
+src/main/webapp/.ember-cli
+src/main/webapp/.gitignore
+src/main/webapp/.jshintrc
+src/main/webapp/.travis.yml
+src/main/webapp/.watchmanconfig
+src/main/webapp/tests/.jshintrc
+src/main/webapp/blueprints/.jshintrc
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d5de848/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
new file mode 100644
index 000..959e169
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -0,0 +1,4 @@
+{
+  "directory": "bower_components",
+  "analytics": false
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d5de848/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
new file mode 100644
index 000..47c5438
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.editorconfig
@@ -0,0 +1,34 @@
+# EditorConfig helps developers define and maintain consistent
+# coding styles between different editors and IDEs
+# editorconfig.org
+
+root = true
+
+
+[*]
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+
+[*.js]
+indent_style = space
+indent_size = 2
+
+[*.hbs]
+insert_final_newline = false
+indent_style = space
+indent_size = 2
+
+[*.css]
+indent_style = space
+indent_size = 2
+
+[*.html]
+indent_style = space
+indent_size = 2
+
+[*.{diff,md}]
+trim_trailing_whitespace = false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d5de848/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.ember-cli
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.ember-cli 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.ember-cli
new file mode 100644
index 000..ee64cfe
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.ember-cli
@@ -0,0 +1,9 @@
+{
+  /**
+Ember CLI sends analytics information by default. The data is 

[43/50] [abbrv] hadoop git commit: YARN-5583. [YARN-3368] Fix wrong paths in .gitignore (Sreenath Somarajapuram via Sunil G)

2016-10-17 Thread wangda
YARN-5583. [YARN-3368] Fix wrong paths in .gitignore (Sreenath Somarajapuram 
via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1a34a32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1a34a32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1a34a32

Branch: refs/heads/YARN-3368
Commit: a1a34a323630dfc53703592f3338a043f28032f9
Parents: 9ef1c37
Author: sunilg 
Authored: Tue Aug 30 20:27:59 2016 +0530
Committer: Wangda Tan 
Committed: Mon Oct 17 11:30:30 2016 -0700

--
 .gitignore | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1a34a32/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 677bde6..f9a7163 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,8 +35,8 @@ 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.sass-cache
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/connect.lock
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/coverage/*
 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/libpeerconnection.log
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webappnpm-debug.log
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapptestem.log
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/npm-debug.log
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/testem.log
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/dist
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp
 yarnregistry.pdf


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: YARN-5321. [YARN-3368] Add resource usage for application by node managers (Wangda Tan via Sunil G) YARN-5320. [YARN-3368] Add resource usage by applications and que

2016-10-17 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/87f22ffb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
index ff49403..b945451 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps.js
@@ -20,7 +20,9 @@ import Ember from 'ember';
 
 export default Ember.Route.extend({
   model() {
-var apps = this.store.findAll('yarn-app');
-return apps;
+return Ember.RSVP.hash({
+  apps: this.store.findAll('yarn-app'),
+  clusterMetrics: this.store.findAll('ClusterMetric'),
+});
   }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87f22ffb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
new file mode 100644
index 000..8719170
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/apps.js
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87f22ffb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
new file mode 100644
index 000..8719170
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87f22ffb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
index 6e57388..64a1b3e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-node.js
@@ -22,6 +22,7 @@ export default Ember.Route.extend({
   model(param) {
 // Fetches data from both NM and RM. RM is queried to get node usage info.
 return Ember.RSVP.hash({
+  nodeInfo: { id: param.node_id, addr: param.node_addr },
   node: this.store.findRecord('yarn-node', param.node_addr),
   rmNode: this.store.findRecord('yarn-rm-node', param.node_id)
 });


  1   2   >