hadoop git commit: YARN-5183. [YARN-3368] Support for responsive navbar when window is resized. (Kai Sasaki via Sunil G)

2016-06-09 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3368 72151c244 -> b775df675


YARN-5183. [YARN-3368] Support for responsive navbar when window is resized. 
(Kai Sasaki via Sunil G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b775df67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b775df67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b775df67

Branch: refs/heads/YARN-3368
Commit: b775df675432399142c5b552c36de50a475ba236
Parents: 72151c2
Author: Sunil 
Authored: Fri Jun 10 10:33:41 2016 +0530
Committer: Sunil 
Committed: Fri Jun 10 10:33:41 2016 +0530

--
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b775df67/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
index bce18ce..d21cc3e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
@@ -32,6 +32,9 @@ module.exports = function(defaults) {
   app.import("bower_components/select2/dist/js/select2.min.js");
   app.import('bower_components/jquery-ui/jquery-ui.js');
   app.import('bower_components/more-js/dist/more.js');
+  app.import('bower_components/bootstrap/dist/css/bootstrap.css');
+  app.import('bower_components/bootstrap/dist/css/bootstrap-theme.css');
+  app.import('bower_components/bootstrap/dist/js/bootstrap.min.js');
 
   // Use `app.import` to add additional libraries to the generated
   // output files.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] [abbrv] hadoop git commit: Revert "HDFS-10390. Implement asynchronous setAcl/getAclStatus for DistributedFileSystem. Contributed by Xiaobing Zhou"

2016-06-09 Thread aengineer
Revert "HDFS-10390. Implement asynchronous setAcl/getAclStatus for 
DistributedFileSystem.  Contributed by Xiaobing Zhou"

This reverts commit 02d4e478a398c24a5e5e8ea2b0822a5b9d4a97ae.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b82c74b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b82c74b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b82c74b9

Branch: refs/heads/HDFS-7240
Commit: b82c74b9102ba95eae776501ed4484be9edd8c96
Parents: 5ee5912
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:14 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:14 2016 -0700

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  59 
 .../hadoop/hdfs/DistributedFileSystem.java  |   3 -
 .../ClientNamenodeProtocolTranslatorPB.java |  30 +-
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 310 ---
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  |  15 +-
 .../hdfs/server/namenode/FSAclBaseTest.java |  12 +-
 6 files changed, 18 insertions(+), 411 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b82c74b9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
index 29bac2a..6bfd71d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
@@ -19,16 +19,12 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
-import java.util.List;
 import java.util.concurrent.Future;
 
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.util.concurrent.AsyncGetFuture;
 import org.apache.hadoop.ipc.Client;
@@ -87,7 +83,6 @@ public class AsyncDistributedFileSystem {
   public Future rename(Path src, Path dst,
   final Options.Rename... options) throws IOException {
 dfs.getFsStatistics().incrementWriteOps(1);
-dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.RENAME);
 
 final Path absSrc = dfs.fixRelativePart(src);
 final Path absDst = dfs.fixRelativePart(dst);
@@ -116,7 +111,6 @@ public class AsyncDistributedFileSystem {
   public Future setPermission(Path p, final FsPermission permission)
   throws IOException {
 dfs.getFsStatistics().incrementWriteOps(1);
-dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_PERMISSION);
 final Path absPath = dfs.fixRelativePart(p);
 final boolean isAsync = Client.isAsynchronousMode();
 Client.setAsynchronousMode(true);
@@ -148,7 +142,6 @@ public class AsyncDistributedFileSystem {
 }
 
 dfs.getFsStatistics().incrementWriteOps(1);
-dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_OWNER);
 final Path absPath = dfs.fixRelativePart(p);
 final boolean isAsync = Client.isAsynchronousMode();
 Client.setAsynchronousMode(true);
@@ -159,56 +152,4 @@ public class AsyncDistributedFileSystem {
   Client.setAsynchronousMode(isAsync);
 }
   }
-
-  /**
-   * Fully replaces ACL of files and directories, discarding all existing
-   * entries.
-   *
-   * @param p
-   *  Path to modify
-   * @param aclSpec
-   *  List describing modifications, must include entries for
-   *  user, group, and others for compatibility with permission bits.
-   * @throws IOException
-   *   if an ACL could not be modified
-   * @return an instance of Future, #get of which is invoked to wait for
-   * asynchronous call being finished.
-   */
-  public Future setAcl(Path p, final List aclSpec)
-  throws IOException {
-dfs.getFsStatistics().incrementWriteOps(1);
-dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_ACL);
-final Path absPath = dfs.fixRelativePart(p);
-final boolean isAsync = Client.isAsynchronousMode();
-Client.setAsynchronousMode(true);
-try {
-  dfs.getClient().setAcl(dfs.getPathName(absPath), aclSpec);
-  return getReturnValue();
-} finally {
-  

[21/50] [abbrv] hadoop git commit: HDFS-10458. getFileEncryptionInfo should return quickly for non-encrypted cluster.

2016-06-09 Thread aengineer
HDFS-10458. getFileEncryptionInfo should return quickly for non-encrypted 
cluster.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6de9213d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6de9213d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6de9213d

Branch: refs/heads/HDFS-7240
Commit: 6de9213df111a9a4ed875db995d67af72d08a798
Parents: a3f78d8
Author: Zhe Zhang 
Authored: Mon Jun 6 15:52:39 2016 -0700
Committer: Zhe Zhang 
Committed: Mon Jun 6 15:52:39 2016 -0700

--
 .../server/namenode/EncryptionZoneManager.java  | 35 +---
 .../server/namenode/FSDirEncryptionZoneOp.java  |  2 +-
 2 files changed, 31 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6de9213d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 8454c04..41dbb59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -95,7 +95,7 @@ public class EncryptionZoneManager {
 }
   }
 
-  private final TreeMap encryptionZones;
+  private TreeMap encryptionZones = null;
   private final FSDirectory dir;
   private final int maxListEncryptionZonesResponses;
 
@@ -106,7 +106,6 @@ public class EncryptionZoneManager {
*/
   public EncryptionZoneManager(FSDirectory dir, Configuration conf) {
 this.dir = dir;
-encryptionZones = new TreeMap();
 maxListEncryptionZonesResponses = conf.getInt(
 DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
 DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT
@@ -143,6 +142,9 @@ public class EncryptionZoneManager {
   CipherSuite suite, CryptoProtocolVersion version, String keyName) {
 final EncryptionZoneInt ez = new EncryptionZoneInt(
 inodeId, suite, version, keyName);
+if (encryptionZones == null) {
+  encryptionZones = new TreeMap<>();
+}
 encryptionZones.put(inodeId, ez);
   }
 
@@ -153,7 +155,9 @@ public class EncryptionZoneManager {
*/
   void removeEncryptionZone(Long inodeId) {
 assert dir.hasWriteLock();
-encryptionZones.remove(inodeId);
+if (hasCreatedEncryptionZone()) {
+  encryptionZones.remove(inodeId);
+}
   }
 
   /**
@@ -201,6 +205,9 @@ public class EncryptionZoneManager {
   private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) {
 assert dir.hasReadLock();
 Preconditions.checkNotNull(iip);
+if (!hasCreatedEncryptionZone()) {
+  return null;
+}
 List inodes = iip.getReadOnlyINodes();
 for (int i = inodes.size() - 1; i >= 0; i--) {
   final INode inode = inodes.get(i);
@@ -313,7 +320,8 @@ public class EncryptionZoneManager {
   throw new IOException("Attempt to create an encryption zone for a 
file.");
 }
 
-if (encryptionZones.get(srcINode.getId()) != null) {
+if (hasCreatedEncryptionZone() && encryptionZones.
+get(srcINode.getId()) != null) {
   throw new IOException("Directory " + src + " is already an encryption " +
   "zone.");
 }
@@ -340,6 +348,9 @@ public class EncryptionZoneManager {
   BatchedListEntries listEncryptionZones(long prevId)
   throws IOException {
 assert dir.hasReadLock();
+if (!hasCreatedEncryptionZone()) {
+  return new BatchedListEntries(Lists.newArrayList(), 
false);
+}
 NavigableMap tailMap = encryptionZones.tailMap
 (prevId, false);
 final int numResponses = Math.min(maxListEncryptionZonesResponses,
@@ -379,7 +390,18 @@ public class EncryptionZoneManager {
* @return number of encryption zones.
*/
   public int getNumEncryptionZones() {
-return encryptionZones.size();
+return hasCreatedEncryptionZone() ?
+encryptionZones.size() : 0;
+  }
+
+  /**
+   * @return Whether there has been any attempt to create an encryption zone in
+   * the cluster at all. If not, it is safe to quickly return null when
+   * checking the encryption information of any file or directory in the
+   * cluster.
+   */
+  public boolean hasCreatedEncryptionZone() {
+return encryptionZones != null;
   }
 
   /**
@@ -387,6 +409,9 @@ public class 

[15/50] [abbrv] hadoop git commit: Revert "Revert "HDFS-10390. Implement asynchronous setAcl/getAclStatus for DistributedFileSystem. Contributed by Xiaobing Zhou""

2016-06-09 Thread aengineer
Revert "Revert "HDFS-10390. Implement asynchronous setAcl/getAclStatus for 
DistributedFileSystem.  Contributed by Xiaobing Zhou""

This reverts commit b82c74b9102ba95eae776501ed4484be9edd8c96.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3d81f38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3d81f38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3d81f38

Branch: refs/heads/HDFS-7240
Commit: b3d81f38da5d3d913e7b7ed498198c899c1e68b7
Parents: 574dcd3
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:30 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:30 2016 +0800

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  59 
 .../hadoop/hdfs/DistributedFileSystem.java  |   3 +
 .../ClientNamenodeProtocolTranslatorPB.java |  30 +-
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 310 +++
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  |  15 +-
 .../hdfs/server/namenode/FSAclBaseTest.java |  12 +-
 6 files changed, 411 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d81f38/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
index 6bfd71d..29bac2a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
@@ -19,12 +19,16 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
+import java.util.List;
 import java.util.concurrent.Future;
 
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.util.concurrent.AsyncGetFuture;
 import org.apache.hadoop.ipc.Client;
@@ -83,6 +87,7 @@ public class AsyncDistributedFileSystem {
   public Future rename(Path src, Path dst,
   final Options.Rename... options) throws IOException {
 dfs.getFsStatistics().incrementWriteOps(1);
+dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.RENAME);
 
 final Path absSrc = dfs.fixRelativePart(src);
 final Path absDst = dfs.fixRelativePart(dst);
@@ -111,6 +116,7 @@ public class AsyncDistributedFileSystem {
   public Future setPermission(Path p, final FsPermission permission)
   throws IOException {
 dfs.getFsStatistics().incrementWriteOps(1);
+dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_PERMISSION);
 final Path absPath = dfs.fixRelativePart(p);
 final boolean isAsync = Client.isAsynchronousMode();
 Client.setAsynchronousMode(true);
@@ -142,6 +148,7 @@ public class AsyncDistributedFileSystem {
 }
 
 dfs.getFsStatistics().incrementWriteOps(1);
+dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_OWNER);
 final Path absPath = dfs.fixRelativePart(p);
 final boolean isAsync = Client.isAsynchronousMode();
 Client.setAsynchronousMode(true);
@@ -152,4 +159,56 @@ public class AsyncDistributedFileSystem {
   Client.setAsynchronousMode(isAsync);
 }
   }
+
+  /**
+   * Fully replaces ACL of files and directories, discarding all existing
+   * entries.
+   *
+   * @param p
+   *  Path to modify
+   * @param aclSpec
+   *  List describing modifications, must include entries for
+   *  user, group, and others for compatibility with permission bits.
+   * @throws IOException
+   *   if an ACL could not be modified
+   * @return an instance of Future, #get of which is invoked to wait for
+   * asynchronous call being finished.
+   */
+  public Future setAcl(Path p, final List aclSpec)
+  throws IOException {
+dfs.getFsStatistics().incrementWriteOps(1);
+dfs.getDFSOpsCountStatistics().incrementOpCounter(OpType.SET_ACL);
+final Path absPath = dfs.fixRelativePart(p);
+final boolean isAsync = Client.isAsynchronousMode();
+Client.setAsynchronousMode(true);
+try {
+  dfs.getClient().setAcl(dfs.getPathName(absPath), aclSpec);
+  return getReturnValue();
+} 

[04/50] [abbrv] hadoop git commit: Revert "HDFS-10430. Reuse FileSystem#access in TestAsyncDFS. Contributed by Xiaobing Zhou."

2016-06-09 Thread aengineer
Revert "HDFS-10430. Reuse FileSystem#access in TestAsyncDFS. Contributed by 
Xiaobing Zhou."

This reverts commit 21890c4239b6a82fd6aab3454ce396efe7b5.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cf47d85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cf47d85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cf47d85

Branch: refs/heads/HDFS-7240
Commit: 8cf47d8589badfc07ef4bca3328a420c7c68abbd
Parents: 5360da8
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:12 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:12 2016 -0700

--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 36 +++-
 1 file changed, 35 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf47d85/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
index c7615a9..ddcf492 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
@@ -34,6 +34,7 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -45,16 +46,19 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.TestDFSPermission.PermissionGenerator;
 import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
 import org.apache.hadoop.ipc.AsyncCallLimitExceededException;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -441,7 +445,7 @@ public class TestAsyncDFS {
 for (int i = 0; i < NUM_TESTS; i++) {
   assertTrue(fs.exists(dsts[i]));
   FsPermission fsPerm = new FsPermission(permissions[i]);
-  fs.access(dsts[i], fsPerm.getUserAction());
+  checkAccessPermissions(fs.getFileStatus(dsts[i]), 
fsPerm.getUserAction());
 }
 
 // test setOwner
@@ -470,4 +474,34 @@ public class TestAsyncDFS {
   assertTrue("group2".equals(fs.getFileStatus(dsts[i]).getGroup()));
 }
   }
+
+  static void checkAccessPermissions(FileStatus stat, FsAction mode)
+  throws IOException {
+checkAccessPermissions(UserGroupInformation.getCurrentUser(), stat, mode);
+  }
+
+  static void checkAccessPermissions(final UserGroupInformation ugi,
+  FileStatus stat, FsAction mode) throws IOException {
+FsPermission perm = stat.getPermission();
+String user = ugi.getShortUserName();
+List groups = Arrays.asList(ugi.getGroupNames());
+
+if (user.equals(stat.getOwner())) {
+  if (perm.getUserAction().implies(mode)) {
+return;
+  }
+} else if (groups.contains(stat.getGroup())) {
+  if (perm.getGroupAction().implies(mode)) {
+return;
+  }
+} else {
+  if (perm.getOtherAction().implies(mode)) {
+return;
+  }
+}
+throw new AccessControlException(String.format(
+"Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat
+.getPath(), stat.getOwner(), stat.getGroup(),
+stat.isDirectory() ? "d" : "-", perm));
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: HDFS-10220. A large number of expired leases can make namenode unresponsive and cause failover (Nicolas Fraison via raviprak)

2016-06-09 Thread aengineer
HDFS-10220. A large number of expired leases can make namenode unresponsive and 
cause failover (Nicolas Fraison via raviprak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae047655
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae047655
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae047655

Branch: refs/heads/HDFS-7240
Commit: ae047655f4355288406cd5396fb4e3ea7c307b14
Parents: 0af96a1
Author: Ravi Prakash 
Authored: Wed Jun 8 13:44:22 2016 -0700
Committer: Ravi Prakash 
Committed: Wed Jun 8 13:44:22 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 10 +
 .../hdfs/server/common/HdfsServerConstants.java |  1 -
 .../hdfs/server/namenode/FSNamesystem.java  | 42 
 .../hdfs/server/namenode/LeaseManager.java  | 21 --
 .../src/main/resources/hdfs-default.xml | 18 +
 .../hdfs/server/namenode/TestLeaseManager.java  | 24 ++-
 6 files changed, 94 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae047655/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 19e1791..f18a6c6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -397,6 +397,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
   public static final int DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT = 32768;
 
+  public static final String  DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY =
+  "dfs.namenode.lease-recheck-interval-ms";
+  public static final longDFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_DEFAULT =
+  2000;
+  public static final String
+  DFS_NAMENODE_MAX_LOCK_HOLD_TO_RELEASE_LEASE_MS_KEY =
+  "dfs.namenode.max-lock-hold-to-release-lease-ms";
+  public static final long
+  DFS_NAMENODE_MAX_LOCK_HOLD_TO_RELEASE_LEASE_MS_DEFAULT = 25;
+
   public static final String  DFS_UPGRADE_DOMAIN_FACTOR = 
"dfs.namenode.upgrade.domain.factor";
   public static final int DFS_UPGRADE_DOMAIN_FACTOR_DEFAULT = 
DFS_REPLICATION_DEFAULT;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae047655/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index b2dda3c..3798394 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -361,7 +361,6 @@ public interface HdfsServerConstants {
   }
   
   String NAMENODE_LEASE_HOLDER = "HDFS_NameNode";
-  long NAMENODE_LEASE_RECHECK_INTERVAL = 2000;
 
   String CRYPTO_XATTR_ENCRYPTION_ZONE =
   "raw.hdfs.crypto.encryption.zone";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae047655/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index c9f2487..915ae97 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -76,6 +76,10 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPI
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY;
+import static 

[09/50] [abbrv] hadoop git commit: Revert "HADOOP-12957. Limit the number of outstanding async calls. Contributed by Xiaobing Zhou"

2016-06-09 Thread aengineer
Revert "HADOOP-12957. Limit the number of outstanding async calls.  Contributed 
by Xiaobing Zhou"

This reverts commit 1b9f18623ab55507bea94888317c7d63d0f4a6f2.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d36b221
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d36b221
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d36b221

Branch: refs/heads/HDFS-7240
Commit: 4d36b221a24e3b626bb91093b0bb0fd377061cae
Parents: f23d5df
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:18 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:18 2016 -0700

--
 .../hadoop/fs/CommonConfigurationKeys.java  |   3 -
 .../ipc/AsyncCallLimitExceededException.java|  36 ---
 .../main/java/org/apache/hadoop/ipc/Client.java |  66 +
 .../org/apache/hadoop/ipc/TestAsyncIPC.java | 199 ++--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  12 +-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 238 ++-
 6 files changed, 109 insertions(+), 445 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d36b221/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 06614db..86e1b43 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -324,9 +324,6 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT =
 4*60*60; // 4 hours
   
-  public static final String  IPC_CLIENT_ASYNC_CALLS_MAX_KEY =
-  "ipc.client.async.calls.max";
-  public static final int IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT = 100;
   public static final String  IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = 
"ipc.client.fallback-to-simple-auth-allowed";
   public static final boolean 
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d36b221/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
deleted file mode 100644
index db97b6c..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ipc;
-
-import java.io.IOException;
-
-/**
- * Signals that an AsyncCallLimitExceededException has occurred. This class is
- * used to make application code using async RPC aware that limit of max async
- * calls is reached, application code need to retrieve results from response of
- * established async calls to avoid buffer overflow in order for follow-on 
async
- * calls going correctly.
- */
-public class AsyncCallLimitExceededException extends IOException {
-  private static final long serialVersionUID = 1L;
-
-  public AsyncCallLimitExceededException(String message) {
-super(message);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d36b221/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 

[13/50] [abbrv] hadoop git commit: Revert "Revert "HDFS-10346. Implement asynchronous setPermission/setOwner for DistributedFileSystem. Contributed by Xiaobing Zhou""

2016-06-09 Thread aengineer
Revert "Revert "HDFS-10346. Implement asynchronous setPermission/setOwner for 
DistributedFileSystem.  Contributed by  Xiaobing Zhou""

This reverts commit f23d5dfc60a017187ae57f3667ac0e688877c2dd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cba9a018
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cba9a018
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cba9a018

Branch: refs/heads/HDFS-7240
Commit: cba9a0188970cb33dcb95e9c49168ac4a83446d9
Parents: aa20fa1
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:29:38 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:29:38 2016 +0800

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  59 
 .../ClientNamenodeProtocolTranslatorPB.java |  39 ++-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 267 +--
 .../apache/hadoop/hdfs/TestDFSPermission.java   |  29 +-
 4 files changed, 351 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cba9a018/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
index 356ae3f..4fe0861 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
@@ -27,6 +27,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.ipc.Client;
 
@@ -37,6 +38,9 @@ import com.google.common.util.concurrent.AbstractFuture;
  * This instance of this class is the way end-user code interacts
  * with a Hadoop DistributedFileSystem in an asynchronous manner.
  *
+ * This class is unstable, so no guarantee is provided as to reliability,
+ * stability or compatibility across any level of release granularity.
+ *
  */
 @Unstable
 public class AsyncDistributedFileSystem {
@@ -111,4 +115,59 @@ public class AsyncDistributedFileSystem {
   Client.setAsynchronousMode(isAsync);
 }
   }
+
+  /**
+   * Set permission of a path.
+   *
+   * @param p
+   *  the path the permission is set to
+   * @param permission
+   *  the permission that is set to a path.
+   * @return an instance of Future, #get of which is invoked to wait for
+   * asynchronous call being finished.
+   */
+  public Future setPermission(Path p, final FsPermission permission)
+  throws IOException {
+dfs.getFsStatistics().incrementWriteOps(1);
+final Path absPath = dfs.fixRelativePart(p);
+final boolean isAsync = Client.isAsynchronousMode();
+Client.setAsynchronousMode(true);
+try {
+  dfs.getClient().setPermission(dfs.getPathName(absPath), permission);
+  return getReturnValue();
+} finally {
+  Client.setAsynchronousMode(isAsync);
+}
+  }
+
+  /**
+   * Set owner of a path (i.e. a file or a directory). The parameters username
+   * and groupname cannot both be null.
+   *
+   * @param p
+   *  The path
+   * @param username
+   *  If it is null, the original username remains unchanged.
+   * @param groupname
+   *  If it is null, the original groupname remains unchanged.
+   * @return an instance of Future, #get of which is invoked to wait for
+   * asynchronous call being finished.
+   */
+  public Future setOwner(Path p, String username, String groupname)
+  throws IOException {
+if (username == null && groupname == null) {
+  throw new IOException("username == null && groupname == null");
+}
+
+dfs.getFsStatistics().incrementWriteOps(1);
+final Path absPath = dfs.fixRelativePart(p);
+final boolean isAsync = Client.isAsynchronousMode();
+Client.setAsynchronousMode(true);
+try {
+  dfs.getClient().setOwner(dfs.getPathName(absPath), username, groupname);
+  return getReturnValue();
+} finally {
+  Client.setAsynchronousMode(isAsync);
+}
+  }
 }


[20/50] [abbrv] hadoop git commit: HADOOP-12807 S3AFileSystem should read AWS credentials from environment variables. Contributed by Tobin Baker.

2016-06-09 Thread aengineer
HADOOP-12807 S3AFileSystem should read AWS credentials from environment 
variables. Contributed by Tobin Baker.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3f78d8f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3f78d8f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3f78d8f

Branch: refs/heads/HDFS-7240
Commit: a3f78d8fa83f07f9183f3546203a191fcf50008c
Parents: 4a1cedc
Author: Steve Loughran 
Authored: Mon Jun 6 23:40:49 2016 +0200
Committer: Steve Loughran 
Committed: Mon Jun 6 23:42:36 2016 +0200

--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java  |  2 ++
 .../src/site/markdown/tools/hadoop-aws/index.md  | 19 +++
 2 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3f78d8f/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index c028544..0281a3a 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
+import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.AmazonServiceException;
 import com.amazonaws.ClientConfiguration;
@@ -464,6 +465,7 @@ public class S3AFileSystem extends FileSystem {
   new BasicAWSCredentialsProvider(
   creds.getAccessKey(), creds.getAccessSecret()),
   new InstanceProfileCredentialsProvider(),
+  new EnvironmentVariableCredentialsProvider(),
   new AnonymousAWSCredentialsProvider()
   );
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3f78d8f/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 7a5e455..7d63a86 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -202,6 +202,25 @@ credentials in S3AFileSystem.
 For additional reading on the credential provider API see:
 [Credential Provider 
API](../../../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html).
 
+ Authenticating via environment variables
+
+S3A supports configuration via [the standard AWS environment 
variables](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-environment).
+
+The core environment variables are for the access key and associated secret:
+
+```
+export AWS_ACCESS_KEY_ID=my.aws.key
+export AWS_SECRET_ACCESS_KEY=my.secret.key
+```
+
+These environment variables can be used to set the authentication credentials
+instead of properties in the Hadoop configuration. *Important:* these
+environment variables are not propagated from client to server when
+YARN applications are launched. That is: having the AWS environment variables
+set when an application is launched will not permit the launched application
+to access S3 resources. The environment variables must (somehow) be set
+on the hosts/processes where the work is executed.
+
 # End to End Steps for Distcp and S3 with Credential Providers
 
 ## provision


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] [abbrv] hadoop git commit: Remove redundant TestMiniDFSCluster.testDualClusters. Contributed by Jiayi Zhou.

2016-06-09 Thread aengineer
Remove redundant TestMiniDFSCluster.testDualClusters. Contributed by Jiayi Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ee9ea00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ee9ea00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ee9ea00

Branch: refs/heads/HDFS-7240
Commit: 1ee9ea002609971ad58082bf525d57fca8a37035
Parents: 5a43583
Author: Andrew Wang 
Authored: Wed Jun 8 12:58:56 2016 -0700
Committer: Andrew Wang 
Committed: Wed Jun 8 12:58:56 2016 -0700

--
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  | 31 
 1 file changed, 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ee9ea00/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index 78ae8b1..ec72d87 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assume.assumeTrue;
 
 import java.io.File;
@@ -84,36 +83,6 @@ public class TestMiniDFSCluster {
 }
   }
 
-  /**
-   * Bring up two clusters and assert that they are in different directories.
-   * @throws Throwable on a failure
-   */
-  @Test(timeout=10)
-  public void testDualClusters() throws Throwable {
-File testDataCluster2 = new File(testDataPath, CLUSTER_2);
-File testDataCluster3 = new File(testDataPath, CLUSTER_3);
-Configuration conf = new HdfsConfiguration();
-String c2Path = testDataCluster2.getAbsolutePath();
-conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c2Path);
-MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).build();
-MiniDFSCluster cluster3 = null;
-try {
-  String dataDir2 = cluster2.getDataDirectory();
-  assertEquals(new File(c2Path + "/data"), new File(dataDir2));
-  //change the data dir
-  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
-   testDataCluster3.getAbsolutePath());
-  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
-  cluster3 = builder.build();
-  String dataDir3 = cluster3.getDataDirectory();
-  assertTrue("Clusters are bound to the same directory: " + dataDir2,
-!dataDir2.equals(dataDir3));
-} finally {
-  MiniDFSCluster.shutdownCluster(cluster3);
-  MiniDFSCluster.shutdownCluster(cluster2);
-}
-  }
-
   @Test(timeout=10)
   public void testIsClusterUpAfterShutdown() throws Throwable {
 Configuration conf = new HdfsConfiguration();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: YARN-4308. ContainersAggregated CPU resource utilization reports negative usage in first few heartbeats. Contributed by Sunil G

2016-06-09 Thread aengineer
YARN-4308. ContainersAggregated CPU resource utilization reports negative usage 
in first few heartbeats. Contributed by Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1500a0a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1500a0a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1500a0a3

Branch: refs/heads/HDFS-7240
Commit: 1500a0a3009e453c9f05a93df7a78b4e185eef30
Parents: ae04765
Author: Naganarasimha 
Authored: Thu Jun 9 05:41:09 2016 +0530
Committer: Naganarasimha 
Committed: Thu Jun 9 05:41:09 2016 +0530

--
 .../yarn/util/ProcfsBasedProcessTree.java   |  8 +++
 .../util/ResourceCalculatorProcessTree.java |  4 +-
 .../yarn/util/WindowsBasedProcessTree.java  |  8 +++
 .../monitor/ContainersMonitorImpl.java  |  9 +++
 .../MockCPUResourceCalculatorProcessTree.java   | 70 
 .../MockResourceCalculatorProcessTree.java  |  5 ++
 .../TestContainersMonitorResourceChange.java| 62 -
 7 files changed, 163 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1500a0a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index bb9c183..80d49c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -467,6 +467,14 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
 return totalStime.add(BigInteger.valueOf(totalUtime));
   }
 
+  /**
+   * Get the CPU usage by all the processes in the process-tree in Unix.
+   * Note: UNAVAILABLE will be returned in case when CPU usage is not
+   * available. It is NOT advised to return any other error code.
+   *
+   * @return percentage CPU usage since the process-tree was created,
+   * {@link #UNAVAILABLE} if CPU usage cannot be calculated or not available.
+   */
   @Override
   public float getCpuUsagePercent() {
 BigInteger processTotalJiffies = getTotalProcessJiffies();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1500a0a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
index 7214c75..771ec86 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
@@ -187,9 +187,11 @@ public abstract class ResourceCalculatorProcessTree 
extends Configured {
* Get the CPU usage by all the processes in the process-tree based on
* average between samples as a ratio of overall CPU cycles similar to top.
* Thus, if 2 out of 4 cores are used this should return 200.0.
+   * Note: UNAVAILABLE will be returned in case when CPU usage is not
+   * available. It is NOT advised to return any other error code.
*
* @return percentage CPU usage since the process-tree was created,
-   * {@link #UNAVAILABLE} if it cannot be calculated.
+   * {@link #UNAVAILABLE} if CPU usage cannot be calculated or not available.
*/
   public float getCpuUsagePercent() {
 return UNAVAILABLE;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1500a0a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
index 7858292..1c7eaf7 100644
--- 

[28/50] [abbrv] hadoop git commit: YARN-4837. User facing aspects of 'AM blacklisting' feature need fixing. (vinodkv via wangda)

2016-06-09 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/620325e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
index 60b728e..e8c8bca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
@@ -42,7 +42,7 @@ public class AppAttemptInfo {
   protected String nodeId;
   protected String logsLink;
   protected String blacklistedNodes;
-  protected String rmBlacklistedNodesForAMLaunches;
+  private String nodesBlacklistedBySystem;
   protected String appAttemptId;
 
   public AppAttemptInfo() {
@@ -69,9 +69,9 @@ public class AppAttemptInfo {
 + masterContainer.getNodeHttpAddress(),
 ConverterUtils.toString(masterContainer.getId()), user);
 
-rmBlacklistedNodesForAMLaunches = StringUtils.join(
-attempt.getAMBlacklist().getBlacklistUpdates().getAdditions(),
-", ");
+nodesBlacklistedBySystem =
+StringUtils.join(attempt.getAMBlacklistManager()
+  .getBlacklistUpdates().getBlacklistAdditions(), ", ");
 if (rm.getResourceScheduler() instanceof AbstractYarnScheduler) {
   AbstractYarnScheduler ayScheduler =
   (AbstractYarnScheduler) rm.getResourceScheduler();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/620325e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
index 4cbe7a8..3d95ca1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
@@ -87,9 +87,6 @@ public class ApplicationSubmissionContextInfo {
   @XmlElement(name = "reservation-id")
   String reservationId;
 
-  @XmlElement(name = "am-black-listing-requests")
-  AMBlackListingRequestInfo amBlackListingRequestInfo;
-
   public ApplicationSubmissionContextInfo() {
 applicationId = "";
 applicationName = "";
@@ -106,7 +103,6 @@ public class ApplicationSubmissionContextInfo {
 logAggregationContextInfo = null;
 attemptFailuresValidityInterval = -1;
 reservationId = "";
-amBlackListingRequestInfo = null;
   }
 
   public String getApplicationId() {
@@ -173,10 +169,6 @@ public class ApplicationSubmissionContextInfo {
 return attemptFailuresValidityInterval;
   }
 
-  public AMBlackListingRequestInfo getAMBlackListingRequestInfo() {
-return amBlackListingRequestInfo;
-  }
-
   public String getReservationId() {
 return reservationId;
   }
@@ -252,9 +244,4 @@ public class ApplicationSubmissionContextInfo {
   public void setReservationId(String reservationId) {
 this.reservationId = reservationId;
   }
-
-  public void setAMBlackListingRequestInfo(
-  AMBlackListingRequestInfo amBlackListingRequestInfo) {
-this.amBlackListingRequestInfo = amBlackListingRequestInfo;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/620325e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
--
diff --git 

[14/50] [abbrv] hadoop git commit: Revert "Revert "HADOOP-13168. Support Future.get with timeout in ipc async calls.""

2016-06-09 Thread aengineer
Revert "Revert "HADOOP-13168. Support Future.get with timeout in ipc async 
calls.""

This reverts commit e4450d47f19131818e1c040b6bd8d85ae8250475.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/574dcd34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/574dcd34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/574dcd34

Branch: refs/heads/HDFS-7240
Commit: 574dcd34c0da1903d25e37dc5757642a584dc3d0
Parents: cba9a01
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:23 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:23 2016 +0800

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 119 --
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  62 +-
 .../apache/hadoop/util/concurrent/AsyncGet.java |  60 +
 .../hadoop/util/concurrent/AsyncGetFuture.java  |  73 +++
 .../org/apache/hadoop/ipc/TestAsyncIPC.java | 124 +++
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  24 +---
 .../ClientNamenodeProtocolTranslatorPB.java |  33 ++---
 7 files changed, 310 insertions(+), 185 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/574dcd34/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 9be4649..d1d5b17 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -18,46 +18,10 @@
 
 package org.apache.hadoop.ipc;
 
-import static org.apache.hadoop.ipc.RpcConstants.*;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.EOFException;
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
-import java.util.Hashtable;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import javax.net.SocketFactory;
-import javax.security.sasl.Sasl;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.protobuf.CodedOutputStream;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -93,14 +57,25 @@ import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.concurrent.AsyncGet;
+import org.apache.hadoop.util.concurrent.AsyncGetFuture;
 import org.apache.htrace.core.Span;
 import org.apache.htrace.core.Tracer;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AbstractFuture;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.CodedOutputStream;
+import javax.net.SocketFactory;
+import javax.security.sasl.Sasl;
+import java.io.*;
+import java.net.*;
+import java.security.PrivilegedExceptionAction;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID;
+import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
 
 /** A client for an IPC 

[17/50] [abbrv] hadoop git commit: Revert "Revert "HDFS-10430. Reuse FileSystem#access in TestAsyncDFS. Contributed by Xiaobing Zhou.""

2016-06-09 Thread aengineer
Revert "Revert "HDFS-10430. Reuse FileSystem#access in TestAsyncDFS. 
Contributed by Xiaobing Zhou.""

This reverts commit 8cf47d8589badfc07ef4bca3328a420c7c68abbd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e7b1ae0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e7b1ae0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e7b1ae0

Branch: refs/heads/HDFS-7240
Commit: 7e7b1ae03759da0becfef677e1d5f7a2ed9041c3
Parents: db41e6d
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:38 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:38 2016 +0800

--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 36 +---
 1 file changed, 1 insertion(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e7b1ae0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
index ddcf492..c7615a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
@@ -34,7 +34,6 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -46,19 +45,16 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.TestDFSPermission.PermissionGenerator;
 import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
 import org.apache.hadoop.ipc.AsyncCallLimitExceededException;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -445,7 +441,7 @@ public class TestAsyncDFS {
 for (int i = 0; i < NUM_TESTS; i++) {
   assertTrue(fs.exists(dsts[i]));
   FsPermission fsPerm = new FsPermission(permissions[i]);
-  checkAccessPermissions(fs.getFileStatus(dsts[i]), 
fsPerm.getUserAction());
+  fs.access(dsts[i], fsPerm.getUserAction());
 }
 
 // test setOwner
@@ -474,34 +470,4 @@ public class TestAsyncDFS {
   assertTrue("group2".equals(fs.getFileStatus(dsts[i]).getGroup()));
 }
   }
-
-  static void checkAccessPermissions(FileStatus stat, FsAction mode)
-  throws IOException {
-checkAccessPermissions(UserGroupInformation.getCurrentUser(), stat, mode);
-  }
-
-  static void checkAccessPermissions(final UserGroupInformation ugi,
-  FileStatus stat, FsAction mode) throws IOException {
-FsPermission perm = stat.getPermission();
-String user = ugi.getShortUserName();
-List groups = Arrays.asList(ugi.getGroupNames());
-
-if (user.equals(stat.getOwner())) {
-  if (perm.getUserAction().implies(mode)) {
-return;
-  }
-} else if (groups.contains(stat.getGroup())) {
-  if (perm.getGroupAction().implies(mode)) {
-return;
-  }
-} else {
-  if (perm.getOtherAction().implies(mode)) {
-return;
-  }
-}
-throw new AccessControlException(String.format(
-"Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat
-.getPath(), stat.getOwner(), stat.getGroup(),
-stat.isDirectory() ? "d" : "-", perm));
-  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: HADOOP-13220. Follow on fixups after upgraded mini-kdc using Kerby. Contributed by Jiajia Li

2016-06-09 Thread aengineer
HADOOP-13220. Follow on fixups after upgraded mini-kdc using Kerby. Contributed 
by Jiajia Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/723432b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/723432b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/723432b3

Branch: refs/heads/HDFS-7240
Commit: 723432b3387fe69e6bf2b56d2ea1a7b1bda16b97
Parents: 76f0800
Author: Kai Zheng 
Authored: Thu Jun 9 15:56:12 2016 +0800
Committer: Kai Zheng 
Committed: Thu Jun 9 15:56:12 2016 +0800

--
 hadoop-common-project/hadoop-auth/pom.xml   |  1 -
 hadoop-common-project/hadoop-common/pom.xml |  1 -
 .../dev-support/findbugsExcludeFile.xml | 28 
 hadoop-common-project/hadoop-minikdc/pom.xml| 14 +-
 hadoop-project/pom.xml  |  6 +
 5 files changed, 47 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/723432b3/hadoop-common-project/hadoop-auth/pom.xml
--
diff --git a/hadoop-common-project/hadoop-auth/pom.xml 
b/hadoop-common-project/hadoop-auth/pom.xml
index 27e4547..93dceb3 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -134,7 +134,6 @@
 
   org.apache.kerby
   kerb-simplekdc
-  1.0.0-RC2
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/723432b3/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 8bf052c..059986f 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -298,7 +298,6 @@
 
   org.apache.kerby
   kerb-simplekdc
-  1.0.0-RC2
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/723432b3/hadoop-common-project/hadoop-minikdc/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-minikdc/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-minikdc/dev-support/findbugsExcludeFile.xml
new file mode 100644
index 000..9a1c4a7
--- /dev/null
+++ b/hadoop-common-project/hadoop-minikdc/dev-support/findbugsExcludeFile.xml
@@ -0,0 +1,28 @@
+
+
+  
+  
+
+
+
+  
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/723432b3/hadoop-common-project/hadoop-minikdc/pom.xml
--
diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml 
b/hadoop-common-project/hadoop-minikdc/pom.xml
index 2e22ad0..3075cad 100644
--- a/hadoop-common-project/hadoop-minikdc/pom.xml
+++ b/hadoop-common-project/hadoop-minikdc/pom.xml
@@ -38,7 +38,6 @@
 
   org.apache.kerby
   kerb-simplekdc
-  1.0.0-RC2
 
 
   org.slf4j
@@ -51,4 +50,17 @@
   compile
 
   
+
+  
+
+  
+org.codehaus.mojo
+findbugs-maven-plugin
+
+  ${basedir}/dev-support/findbugsExcludeFile.xml
+  
+
+  
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/723432b3/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index aa47f6c..2b6b162 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1011,6 +1011,12 @@
 1.3.0
 
 
+
+  org.apache.kerby
+  kerb-simplekdc
+  1.0.0-RC2
+
+
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: YARN-4525. Fix bug in RLESparseResourceAllocation.getRangeOverlapping(). (Ishai Menache and Carlo Curino via asuresh)

2016-06-09 Thread aengineer
YARN-4525. Fix bug in RLESparseResourceAllocation.getRangeOverlapping(). (Ishai 
Menache and Carlo Curino via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a154f75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a154f75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a154f75

Branch: refs/heads/HDFS-7240
Commit: 3a154f75ed85d864b3ffd35818992418f2b6aa59
Parents: 7a9b737
Author: Arun Suresh 
Authored: Mon Jun 6 21:18:32 2016 -0700
Committer: Arun Suresh 
Committed: Mon Jun 6 21:18:32 2016 -0700

--
 .../RLESparseResourceAllocation.java|  6 +-
 .../TestRLESparseResourceAllocation.java| 22 
 2 files changed, 27 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a154f75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
index 63defb5..c18a93e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
@@ -510,7 +510,11 @@ public class RLESparseResourceAllocation {
   long previous = a.floorKey(start);
   a = a.tailMap(previous, true);
 }
-a = a.headMap(end, true);
+
+if (end < a.lastKey()) {
+  a = a.headMap(end, true);
+}
+
   }
   RLESparseResourceAllocation ret =
   new RLESparseResourceAllocation(a, resourceCalculator);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a154f75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
index b526484..f8d2a4a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
@@ -283,6 +283,28 @@ public class TestRLESparseResourceAllocation {
   }
 
   @Test
+  public void testRangeOverlapping() {
+ResourceCalculator resCalc = new DefaultResourceCalculator();
+
+RLESparseResourceAllocation r =
+new RLESparseResourceAllocation(resCalc);
+int[] alloc = {10, 10, 10, 10, 10, 10};
+int start = 100;
+Set> inputs =
+generateAllocation(start, alloc, false).entrySet();
+for (Entry ip : inputs) {
+  r.addInterval(ip.getKey(), ip.getValue());
+}
+long s = r.getEarliestStartTime();
+long d = r.getLatestNonNullTime();
+
+// tries to trigger "out-of-range" bug
+r =  r.getRangeOverlapping(s, d);
+r = r.getRangeOverlapping(s-1, d-1);
+r = r.getRangeOverlapping(s+1, d+1);
+  }
+
+  @Test
   public void testBlocks() {
 ResourceCalculator resCalc = new DefaultResourceCalculator();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: Revert "HADOOP-13226 Support async call retry and failover."

2016-06-09 Thread aengineer
Revert "HADOOP-13226 Support async call retry and failover."

This reverts commit 83f2f78c118a7e52aba5104bd97b0acedc96be7b.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5360da8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5360da8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5360da8b

Branch: refs/heads/HDFS-7240
Commit: 5360da8bd9f720384860f411bee081aef13b4bd4
Parents: 47e0321
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:09 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:09 2016 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   8 +-
 .../hadoop/io/retry/AsyncCallHandler.java   | 321 ---
 .../org/apache/hadoop/io/retry/CallReturn.java  |  75 -
 .../hadoop/io/retry/RetryInvocationHandler.java | 134 ++--
 .../apache/hadoop/io/retry/RetryPolicies.java   |   4 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |  25 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  13 +-
 .../apache/hadoop/util/concurrent/AsyncGet.java |  17 +-
 .../org/apache/hadoop/ipc/TestAsyncIPC.java |  10 +-
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |   7 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  42 +--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java|  43 ++-
 .../apache/hadoop/hdfs/TestAsyncHDFSWithHA.java | 181 ---
 .../hdfs/server/namenode/ha/HATestUtil.java |   9 +-
 14 files changed, 114 insertions(+), 775 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5360da8b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index a644aa5..ab8673b 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -345,13 +345,7 @@

  
 
- 
- 
-   
-   
-   
- 
-
+ 
  



http://git-wip-us.apache.org/repos/asf/hadoop/blob/5360da8b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
deleted file mode 100644
index 5a03b03..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.retry;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.util.Daemon;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.concurrent.AsyncGet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.reflect.Method;
-import java.util.LinkedList;
-import java.util.Queue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReference;
-
-/** Handle async calls. */
-@InterfaceAudience.Private
-public class AsyncCallHandler {
-  static final Logger LOG = LoggerFactory.getLogger(AsyncCallHandler.class);
-
-  private static final ThreadLocal
-  LOWER_LAYER_ASYNC_RETURN = new ThreadLocal<>();
-  private static final ThreadLocal>
-  ASYNC_RETURN = new ThreadLocal<>();
-
-  /** @return the async return value from {@link AsyncCallHandler}. */
-  @InterfaceStability.Unstable
-  

[31/50] [abbrv] hadoop git commit: Addendum patch for YARN-5180 updating findbugs-exclude.xml

2016-06-09 Thread aengineer
Addendum patch for YARN-5180 updating findbugs-exclude.xml


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8554aee1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8554aee1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8554aee1

Branch: refs/heads/HDFS-7240
Commit: 8554aee1bef5aff9e49e5e9119d6a7a4abf1c432
Parents: 733f3f1
Author: Arun Suresh 
Authored: Tue Jun 7 15:59:13 2016 -0700
Committer: Arun Suresh 
Committed: Tue Jun 7 15:59:43 2016 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml| 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8554aee1/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 81c7e6a..6998d75 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -517,6 +517,11 @@
   
 
   
+
+
+
+  
+  
 
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: HADOOP-10048. LocalDirAllocator should avoid holding locks while accessing the filesystem. Contributed by Jason Lowe.

2016-06-09 Thread aengineer
HADOOP-10048. LocalDirAllocator should avoid holding locks while accessing the 
filesystem. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c14c1b29
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c14c1b29
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c14c1b29

Branch: refs/heads/HDFS-7240
Commit: c14c1b298e29e799f7c8f15ff24d7eba6e0cd39b
Parents: e620530
Author: Junping Du 
Authored: Tue Jun 7 09:18:58 2016 -0700
Committer: Junping Du 
Committed: Tue Jun 7 09:18:58 2016 -0700

--
 .../org/apache/hadoop/fs/LocalDirAllocator.java | 153 ---
 1 file changed, 94 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c14c1b29/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
index 70cf87d..b14e1f0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
@@ -20,9 +20,10 @@ package org.apache.hadoop.fs;
 
 import java.io.*;
 import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.commons.logging.*;
-
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -247,74 +248,101 @@ public class LocalDirAllocator {
 private final Log LOG =
   LogFactory.getLog(AllocatorPerContext.class);
 
-private int dirNumLastAccessed;
 private Random dirIndexRandomizer = new Random();
-private FileSystem localFS;
-private DF[] dirDF = new DF[0];
 private String contextCfgItemName;
-private String[] localDirs = new String[0];
-private String savedLocalDirs = "";
+
+// NOTE: the context must be accessed via a local reference as it
+//   may be updated at any time to reference a different context
+private AtomicReference currentContext;
+
+private static class Context {
+  private AtomicInteger dirNumLastAccessed = new AtomicInteger(0);
+  private FileSystem localFS;
+  private DF[] dirDF;
+  private Path[] localDirs;
+  private String savedLocalDirs;
+
+  public int getAndIncrDirNumLastAccessed() {
+return getAndIncrDirNumLastAccessed(1);
+  }
+
+  public int getAndIncrDirNumLastAccessed(int delta) {
+if (localDirs.length < 2 || delta == 0) {
+  return dirNumLastAccessed.get();
+}
+int oldval, newval;
+do {
+  oldval = dirNumLastAccessed.get();
+  newval = (oldval + delta) % localDirs.length;
+} while (!dirNumLastAccessed.compareAndSet(oldval, newval));
+return oldval;
+  }
+}
 
 public AllocatorPerContext(String contextCfgItemName) {
   this.contextCfgItemName = contextCfgItemName;
+  this.currentContext = new AtomicReference(new Context());
 }
 
 /** This method gets called everytime before any read/write to make sure
  * that any change to localDirs is reflected immediately.
  */
-private synchronized void confChanged(Configuration conf) 
+private Context confChanged(Configuration conf)
 throws IOException {
+  Context ctx = currentContext.get();
   String newLocalDirs = conf.get(contextCfgItemName);
   if (null == newLocalDirs) {
 throw new IOException(contextCfgItemName + " not configured");
   }
-  if (!newLocalDirs.equals(savedLocalDirs)) {
-localDirs = StringUtils.getTrimmedStrings(newLocalDirs);
-localFS = FileSystem.getLocal(conf);
-int numDirs = localDirs.length;
-ArrayList dirs = new ArrayList(numDirs);
+  if (!newLocalDirs.equals(ctx.savedLocalDirs)) {
+ctx = new Context();
+String[] dirStrings = StringUtils.getTrimmedStrings(newLocalDirs);
+ctx.localFS = FileSystem.getLocal(conf);
+int numDirs = dirStrings.length;
+ArrayList dirs = new ArrayList(numDirs);
 ArrayList dfList = new ArrayList(numDirs);
 for (int i = 0; i < numDirs; i++) {
   try {
 // filter problematic directories
-Path tmpDir = new Path(localDirs[i]);
-if(localFS.mkdirs(tmpDir)|| localFS.exists(tmpDir)) {
+Path tmpDir = new Path(dirStrings[i]);
+if(ctx.localFS.mkdirs(tmpDir)|| ctx.localFS.exists(tmpDir)) 

[34/50] [abbrv] hadoop git commit: YARN-5206. RegistrySecurity includes id:pass in exception text if considered invalid. Contributed by Steve Loughran

2016-06-09 Thread aengineer
YARN-5206. RegistrySecurity includes id:pass in exception text if considered 
invalid. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c8a377c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c8a377c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c8a377c

Branch: refs/heads/HDFS-7240
Commit: 8c8a377cac10b086a7ff37ee366b79e6b04d2738
Parents: 723432b
Author: Jason Lowe 
Authored: Wed Jun 8 14:11:25 2016 +
Committer: Jason Lowe 
Committed: Wed Jun 8 14:11:25 2016 +

--
 .../apache/hadoop/registry/client/impl/zk/RegistrySecurity.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c8a377c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
index fc61460..49673fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
@@ -443,7 +443,7 @@ public class RegistrySecurity extends AbstractService {
*/
   public String digest(String idPasswordPair) throws IOException {
 if (StringUtils.isEmpty(idPasswordPair) || !isValid(idPasswordPair)) {
-  throw new IOException("Invalid id:password: " + idPasswordPair);
+  throw new IOException("Invalid id:password");
 }
 try {
   return DigestAuthenticationProvider.generateDigest(idPasswordPair);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: YARN-5204. Properly report status of killed/stopped queued containers. (Konstantinos Karanasos via asuresh)

2016-06-09 Thread aengineer
YARN-5204. Properly report status of killed/stopped queued containers. 
(Konstantinos Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3344ba70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3344ba70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3344ba70

Branch: refs/heads/HDFS-7240
Commit: 3344ba70e027c929e07bad5e6877c796d41181e9
Parents: 8c8a377
Author: Arun Suresh 
Authored: Wed Jun 8 08:31:32 2016 -0700
Committer: Arun Suresh 
Committed: Wed Jun 8 08:31:32 2016 -0700

--
 .../queuing/QueuingContainerManagerImpl.java|  15 ++-
 .../queuing/TestQueuingContainerManager.java| 129 +++
 2 files changed, 115 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3344ba70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
index a1e3bdb..38b1b07 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
@@ -175,8 +175,9 @@ public class QueuingContainerManagerImpl extends 
ContainerManagerImpl {
   }
 
   nodeStatusUpdater.sendOutofBandHeartBeat();
+} else {
+  super.stopContainerInternal(containerID);
 }
-super.stopContainerInternal(containerID);
   }
 
   /**
@@ -456,6 +457,18 @@ public class QueuingContainerManagerImpl extends 
ContainerManagerImpl {
 ContainerExitStatus.INVALID, this.context.getQueuingContext()
 .getQueuedContainers().get(containerID).getResource(),
 executionType);
+  } else {
+// Check if part of the stopped/killed queued containers.
+for (ContainerTokenIdentifier cTokenId : this.context
+.getQueuingContext().getKilledQueuedContainers().keySet()) {
+  if (cTokenId.getContainerID().equals(containerID)) {
+return BuilderUtils.newContainerStatus(containerID,
+org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE,
+this.context.getQueuingContext().getKilledQueuedContainers()
+.get(cTokenId), ContainerExitStatus.ABORTED, cTokenId
+.getResource(), cTokenId.getExecutionType());
+  }
+}
   }
 }
 return super.getContainerStatusInternal(containerID, nmTokenIdentifier);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3344ba70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/TestQueuingContainerManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/TestQueuingContainerManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/TestQueuingContainerManager.java
index 4d44d8d..caebef7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/TestQueuingContainerManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/TestQueuingContainerManager.java
@@ -24,13 +24,13 @@ import java.util.Arrays;
 import java.util.List;
 
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
 import 

[19/50] [abbrv] hadoop git commit: MAPREDUCE-5044. Have AM trigger jstack on task attempts that timeout before killing them. (Eric Payne and Gera Shegalov via mingma)

2016-06-09 Thread aengineer
MAPREDUCE-5044. Have AM trigger jstack on task attempts that timeout before 
killing them. (Eric Payne and Gera Shegalov via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a1cedc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a1cedc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a1cedc0

Branch: refs/heads/HDFS-7240
Commit: 4a1cedc010d3fa1d8ef3f2773ca12acadfee5ba5
Parents: 35f255b
Author: Ming Ma 
Authored: Mon Jun 6 14:30:51 2016 -0700
Committer: Ming Ma 
Committed: Mon Jun 6 14:30:51 2016 -0700

--
 .../hadoop/mapred/LocalContainerLauncher.java   |  28 +
 .../v2/app/job/impl/TaskAttemptImpl.java|   5 +-
 .../v2/app/launcher/ContainerLauncherEvent.java |  21 +++-
 .../v2/app/launcher/ContainerLauncherImpl.java  |  19 ++-
 .../v2/app/launcher/TestContainerLauncher.java  |  10 +-
 .../app/launcher/TestContainerLauncherImpl.java |   8 ++
 .../hadoop/mapred/ResourceMgrDelegate.java  |   5 +-
 .../hadoop/mapred/TestClientRedirect.java   |   2 +-
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  | 119 +++
 .../yarn/api/ApplicationClientProtocol.java |   2 +-
 .../yarn/api/ContainerManagementProtocol.java   |   5 +
 .../SignalContainerResponse.java|   2 +-
 .../main/proto/applicationclient_protocol.proto |   2 +-
 .../proto/containermanagement_protocol.proto|   1 +
 .../hadoop/yarn/client/api/YarnClient.java  |   2 +-
 .../yarn/client/api/impl/YarnClientImpl.java|   4 +-
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |   6 +-
 .../yarn/client/api/impl/TestYarnClient.java|   4 +-
 .../yarn/api/ContainerManagementProtocolPB.java |   7 ++
 .../ApplicationClientProtocolPBClientImpl.java  |   4 +-
 ...ContainerManagementProtocolPBClientImpl.java |  19 +++
 .../ApplicationClientProtocolPBServiceImpl.java |   5 +-
 ...ontainerManagementProtocolPBServiceImpl.java |  20 
 .../hadoop/yarn/TestContainerLaunchRPC.java |  10 ++
 .../yarn/TestContainerResourceIncreaseRPC.java  |   8 ++
 .../java/org/apache/hadoop/yarn/TestRPC.java|  10 ++
 .../containermanager/ContainerManagerImpl.java  |  38 --
 .../amrmproxy/MockResourceManagerFacade.java|   2 +-
 .../server/resourcemanager/ClientRMService.java |   2 +-
 .../yarn/server/resourcemanager/MockRM.java |   6 +-
 .../server/resourcemanager/NodeManager.java |   9 +-
 .../resourcemanager/TestAMAuthorization.java|   8 ++
 .../TestApplicationMasterLauncher.java  |   8 ++
 .../resourcemanager/TestSignalContainer.java|   2 +-
 34 files changed, 361 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a1cedc0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
index da118c5..190d988 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -20,6 +20,10 @@ package org.apache.hadoop.mapred;
 
 import java.io.File;
 import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -255,6 +259,30 @@ public class LocalContainerLauncher extends 
AbstractService implements
 
 } else if (event.getType() == EventType.CONTAINER_REMOTE_CLEANUP) {
 
+  if (event.getDumpContainerThreads()) {
+try {
+  // Construct full thread dump header
+  System.out.println(new java.util.Date());
+  RuntimeMXBean rtBean = ManagementFactory.getRuntimeMXBean();
+  System.out.println("Full thread dump " + rtBean.getVmName()
+  + " (" + rtBean.getVmVersion()
+  + " " + rtBean.getSystemProperties().get("java.vm.info")
+  + "):\n");
+  // Dump threads' states and stacks
+  ThreadMXBean tmxBean = ManagementFactory.getThreadMXBean();
+  ThreadInfo[] tInfos = tmxBean.dumpAllThreads(
+  

[24/50] [abbrv] hadoop git commit: YARN-5118. Tests fails with localizer port bind exception. Contributed by Brahma Reddy Battula.

2016-06-09 Thread aengineer
YARN-5118. Tests fails with localizer port bind exception. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bddea5fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bddea5fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bddea5fe

Branch: refs/heads/HDFS-7240
Commit: bddea5fe5fe72eee8e2ecfcec616bd8ceb4d72e7
Parents: 3a154f7
Author: Rohith Sharma K S 
Authored: Tue Jun 7 11:20:15 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Jun 7 11:20:15 2016 +0530

--
 .../apache/hadoop/yarn/server/nodemanager/TestEventFlow.java  | 3 +++
 .../server/nodemanager/TestNodeStatusUpdaterForLabels.java| 7 +++
 .../containermanager/BaseContainerManagerTest.java| 3 +++
 3 files changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddea5fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
index f126080..a9ff83c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
@@ -25,6 +25,7 @@ import java.util.List;
 
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
@@ -91,6 +92,8 @@ public class TestEventFlow {
 conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
 conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, 
 remoteLogDir.getAbsolutePath());
+conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:"
++ ServerSocketUtil.getPort(8040, 10));
 
 ContainerExecutor exec = new DefaultContainerExecutor();
 exec.setConf(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddea5fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
index 563104e..257e18c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
@@ -28,6 +28,7 @@ import java.nio.ByteBuffer;
 import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.service.ServiceOperations;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -233,6 +234,9 @@ public class TestNodeStatusUpdaterForLabels extends 
NodeLabelTestBase {
 
 YarnConfiguration conf = createNMConfigForDistributeNodeLabels();
 conf.setLong(YarnConfiguration.NM_NODE_LABELS_RESYNC_INTERVAL, 2000);
+conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:"
++ ServerSocketUtil.getPort(8040, 10));
+
 nm.init(conf);
 resourceTracker.resetNMHeartbeatReceiveFlag();
 nm.start();
@@ -329,6 +333,9 @@ public class TestNodeStatusUpdaterForLabels extends 
NodeLabelTestBase {
 };
 dummyLabelsProviderRef.setNodeLabels(toNodeLabelSet("P"));
 YarnConfiguration conf = createNMConfigForDistributeNodeLabels();
+conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:"
++ 

[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2016-06-09 Thread aengineer
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23923086
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23923086
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23923086

Branch: refs/heads/HDFS-7240
Commit: 23923086d1aff381fcf3a0d0e1dd19ed029bda18
Parents: 54f7975 9581fb7
Author: Anu Engineer 
Authored: Thu Jun 9 20:34:39 2016 -0700
Committer: Anu Engineer 
Committed: Thu Jun 9 20:34:39 2016 -0700

--
 LICENSE.txt | 1017 +++-
 NOTICE.txt  |  266 +
 dev-support/bin/qbt |   18 +
 dev-support/bin/yetus-wrapper   |2 +-
 dev-support/checkstyle/suppressions.xml |   21 +
 hadoop-build-tools/pom.xml  |   41 +
 hadoop-common-project/hadoop-auth/pom.xml   |   27 +-
 .../authentication/util/KerberosUtil.java   |   18 +-
 .../TestKerberosAuthenticationHandler.java  |1 -
 .../authentication/util/TestKerberosUtil.java   |   26 +-
 .../hadoop-common/HadoopCommon.cmake|9 +-
 .../dev-support/findbugsExcludeFile.xml |8 +-
 hadoop-common-project/hadoop-common/pom.xml |4 +
 .../KeyProviderDelegationTokenExtension.java|   30 +-
 .../org/apache/hadoop/crypto/key/KeyShell.java  |  182 +--
 .../crypto/key/kms/KMSClientProvider.java   |  158 ++-
 .../key/kms/LoadBalancingKMSClientProvider.java |   41 +-
 .../hadoop/crypto/key/kms/ValueQueue.java   |2 +-
 .../hadoop/fs/CommonConfigurationKeys.java  |3 +
 .../hadoop/fs/InvalidRequestException.java  |4 +
 .../org/apache/hadoop/fs/LocalDirAllocator.java |  153 ++-
 .../hadoop/fs/PathAccessDeniedException.java|   12 +-
 .../apache/hadoop/fs/PathNotFoundException.java |   20 +-
 .../hadoop/fs/PathPermissionException.java  |   16 +-
 .../apache/hadoop/io/FastByteComparisons.java   |2 +-
 .../apache/hadoop/io/erasurecode/CodecUtil.java |  113 +-
 .../io/erasurecode/ErasureCoderOptions.java |   89 ++
 .../erasurecode/coder/HHXORErasureDecoder.java  |   18 +-
 .../erasurecode/coder/HHXORErasureEncoder.java  |   15 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  |6 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |6 +-
 .../io/erasurecode/coder/XORErasureDecoder.java |6 +-
 .../io/erasurecode/coder/XORErasureEncoder.java |6 +-
 .../rawcoder/AbstractRawErasureCoder.java   |  220 
 .../rawcoder/AbstractRawErasureDecoder.java |  181 ---
 .../rawcoder/AbstractRawErasureEncoder.java |  146 ---
 .../rawcoder/ByteArrayDecodingState.java|  111 ++
 .../rawcoder/ByteArrayEncodingState.java|   81 ++
 .../rawcoder/ByteBufferDecodingState.java   |  134 +++
 .../rawcoder/ByteBufferEncodingState.java   |   98 ++
 .../io/erasurecode/rawcoder/CoderOption.java|   43 -
 .../io/erasurecode/rawcoder/CoderUtil.java  |  199 
 .../io/erasurecode/rawcoder/DecodingState.java  |   55 +
 .../erasurecode/rawcoder/DummyRawDecoder.java   |   16 +-
 .../erasurecode/rawcoder/DummyRawEncoder.java   |   15 +-
 .../rawcoder/DummyRawErasureCoderFactory.java   |   10 +-
 .../io/erasurecode/rawcoder/EncodingState.java  |   44 +
 .../io/erasurecode/rawcoder/RSRawDecoder.java   |   48 +-
 .../rawcoder/RSRawDecoderLegacy.java|   66 +-
 .../io/erasurecode/rawcoder/RSRawEncoder.java   |   45 +-
 .../rawcoder/RSRawEncoderLegacy.java|   82 +-
 .../rawcoder/RSRawErasureCoderFactory.java  |9 +-
 .../RSRawErasureCoderFactoryLegacy.java |9 +-
 .../erasurecode/rawcoder/RawErasureCoder.java   |   73 --
 .../rawcoder/RawErasureCoderFactory.java|   11 +-
 .../erasurecode/rawcoder/RawErasureDecoder.java |  137 ++-
 .../erasurecode/rawcoder/RawErasureEncoder.java |  135 ++-
 .../io/erasurecode/rawcoder/XORRawDecoder.java  |   51 +-
 .../io/erasurecode/rawcoder/XORRawEncoder.java  |   57 +-
 .../rawcoder/XORRawErasureCoderFactory.java |9 +-
 .../io/erasurecode/rawcoder/package-info.java   |   38 +
 .../io/erasurecode/rawcoder/util/CoderUtil.java |   83 --
 .../erasurecode/rawcoder/util/GaloisField.java  |4 +-
 .../hadoop/io/retry/AsyncCallHandler.java   |  321 +
 .../org/apache/hadoop/io/retry/CallReturn.java  |   75 ++
 .../hadoop/io/retry/RetryInvocationHandler.java |  134 ++-
 .../apache/hadoop/io/retry/RetryPolicies.java   |4 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |  124 +-
 .../apache/hadoop/ipc/DecayRpcScheduler.java|  130 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|   65 +-
 .../java/org/apache/hadoop/log/LogLevel.java|  285 -
 .../hadoop/metrics2/impl/MetricsSystemImpl.java |1 +
 .../metrics2/lib/DefaultMetricsSystem.java  |9 +
 

[36/50] [abbrv] hadoop git commit: YARN-5080. Addendum fix to the original patch to fix YARN logs CLI. Contributed by Xuan Gong

2016-06-09 Thread aengineer
YARN-5080. Addendum fix to the original patch to fix YARN logs CLI. Contributed 
by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a43583c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a43583c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a43583c

Branch: refs/heads/HDFS-7240
Commit: 5a43583c0bbb9650ea6a9f48d9544ec3ec24b580
Parents: 3344ba7
Author: Vinod Kumar Vavilapalli 
Authored: Wed Jun 8 09:49:55 2016 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Wed Jun 8 09:49:55 2016 -0700

--
 .../src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a43583c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index bbe636f..d62ee5e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -278,7 +278,9 @@ public class LogsCLI extends Configured implements Tool {
   Configuration conf, String appId) throws ClientHandlerException,
   UniformInterfaceException, JSONException {
 Client webServiceClient = Client.create();
-String webAppAddress = WebAppUtils.getRMWebAppURLWithScheme(conf);
+String webAppAddress = WebAppUtils.getHttpSchemePrefix(conf) +
+WebAppUtils.getWebAppBindURL(conf, YarnConfiguration.RM_BIND_HOST,
+WebAppUtils.getRMWebAppURLWithoutScheme(conf));
 WebResource webResource = webServiceClient.resource(webAppAddress);
 
 ClientResponse response =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: HADOOP-12537 S3A to support Amazon STS temporary credentials. Contributed by Sean Mackrory.

2016-06-09 Thread aengineer
HADOOP-12537 S3A to support Amazon STS temporary credentials. Contributed by 
Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31ffaf76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31ffaf76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31ffaf76

Branch: refs/heads/HDFS-7240
Commit: 31ffaf76f2b6e1fd2a141daa4daaebdfecefe727
Parents: 9378d94
Author: Steve Loughran 
Authored: Thu Jun 9 20:58:30 2016 +0100
Committer: Steve Loughran 
Committed: Thu Jun 9 21:00:47 2016 +0100

--
 .../src/main/resources/core-default.xml |   5 +
 hadoop-project/pom.xml  |   8 +-
 hadoop-tools/hadoop-aws/pom.xml |   5 +
 .../fs/s3a/BasicAWSCredentialsProvider.java |   3 +-
 .../org/apache/hadoop/fs/s3a/Constants.java |   3 +
 .../s3a/CredentialInitializationException.java  |  46 ++
 .../fs/s3a/TemporaryAWSCredentialsProvider.java |  70 +
 .../src/site/markdown/tools/hadoop-aws/index.md |  71 -
 .../fs/s3a/TestS3ATemporaryCredentials.java | 150 +++
 9 files changed, 357 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 8bb27ea..39b7132 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -806,6 +806,11 @@
 
 
 
+  fs.s3a.session.token
+  The session token used with temporary credentials. Used only 
with provider 
org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider.
+
+
+
   fs.s3a.connection.maximum
   15
   Controls the maximum number of simultaneous connections to 
S3.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 2b6b162..4c618a1 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -116,6 +116,7 @@
 1.0-beta-1
 1.0-alpha-8
 900
+1.10.6
   
 
   
@@ -690,7 +691,12 @@
   
 com.amazonaws
 aws-java-sdk-s3
-1.10.6
+${aws-java-sdk.version}
+  
+  
+com.amazonaws
+aws-java-sdk-sts
+${aws-java-sdk.version}
   
   
 org.apache.mina

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index c95f1e6..7c25e60 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -231,6 +231,11 @@
   compile
 
 
+  com.amazonaws
+  aws-java-sdk-sts
+  test
+
+
   junit
   junit
   test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
index 3a5ee8c..61be43f 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.fs.s3a;
 
-import com.amazonaws.AmazonClientException;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.auth.BasicAWSCredentials;
 import com.amazonaws.auth.AWSCredentials;
@@ -49,7 +48,7 @@ public class BasicAWSCredentialsProvider implements 
AWSCredentialsProvider {
 if (!StringUtils.isEmpty(accessKey) && !StringUtils.isEmpty(secretKey)) {
   return new BasicAWSCredentials(accessKey, secretKey);
 }
-throw new AmazonClientException(
+throw new CredentialInitializationException(
 "Access key or secret key is null");
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 

[10/50] [abbrv] hadoop git commit: Revert "HDFS-10224. Implement asynchronous rename for DistributedFileSystem. Contributed by Xiaobing Zhou"

2016-06-09 Thread aengineer
Revert "HDFS-10224. Implement asynchronous rename for DistributedFileSystem.  
Contributed by Xiaobing Zhou"

This reverts commit fc94810d3f537e51e826fc21ade7867892b9d8dc.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/106234d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/106234d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/106234d8

Branch: refs/heads/HDFS-7240
Commit: 106234d873c60fa52cd0d812fb1cdc0c6b998a6d
Parents: 4d36b22
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:55 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:55 2016 -0700

--
 .../java/org/apache/hadoop/fs/FileSystem.java   |   1 +
 .../main/java/org/apache/hadoop/ipc/Client.java |  11 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  34 +--
 .../org/apache/hadoop/ipc/TestAsyncIPC.java |   2 +-
 .../hadoop/hdfs/AsyncDistributedFileSystem.java | 110 
 .../hadoop/hdfs/DistributedFileSystem.java  |  22 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  45 +---
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 258 ---
 8 files changed, 20 insertions(+), 463 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/106234d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 9e13a7a..0ecd8b7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1252,6 +1252,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   /**
* Renames Path src to Path dst
* 
+   * Fails if src is a file and dst is a directory.
* Fails if src is a directory and dst is a file.
* Fails if the parent of dst does not exist or is a file.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/106234d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index d59aeb89..f206861 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -119,8 +119,7 @@ public class Client implements AutoCloseable {
 
   private static final ThreadLocal callId = new 
ThreadLocal();
   private static final ThreadLocal retryCount = new 
ThreadLocal();
-  private static final ThreadLocal
-  RETURN_RPC_RESPONSE = new ThreadLocal<>();
+  private static final ThreadLocal returnValue = new 
ThreadLocal<>();
   private static final ThreadLocal asynchronousMode =
   new ThreadLocal() {
 @Override
@@ -131,8 +130,8 @@ public class Client implements AutoCloseable {
 
   @SuppressWarnings("unchecked")
   @Unstable
-  public static  Future getReturnRpcResponse() {
-return (Future) RETURN_RPC_RESPONSE.get();
+  public static  Future getReturnValue() {
+return (Future) returnValue.get();
   }
 
   /** Set call id and retry count for the next call. */
@@ -1397,7 +1396,7 @@ public class Client implements AutoCloseable {
 }
   };
 
-  RETURN_RPC_RESPONSE.set(returnFuture);
+  returnValue.set(returnFuture);
   return null;
 } else {
   return getRpcResponse(call, connection);
@@ -1411,7 +1410,7 @@ public class Client implements AutoCloseable {
*  synchronous mode.
*/
   @Unstable
-  public static boolean isAsynchronousMode() {
+  static boolean isAsynchronousMode() {
 return asynchronousMode.get();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/106234d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 8fcdb78..071e2e8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -26,9 +26,7 @@ import 

[11/50] [abbrv] hadoop git commit: Revert "Revert "HDFS-10224. Implement asynchronous rename for DistributedFileSystem. Contributed by Xiaobing Zhou""

2016-06-09 Thread aengineer
Revert "Revert "HDFS-10224. Implement asynchronous rename for 
DistributedFileSystem.  Contributed by Xiaobing Zhou""

This reverts commit 106234d873c60fa52cd0d812fb1cdc0c6b998a6d.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eded3d10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eded3d10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eded3d10

Branch: refs/heads/HDFS-7240
Commit: eded3d109e4c5225d8c5cd3c2d82e7ac93841263
Parents: 106234d
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:28:21 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:28:21 2016 +0800

--
 .../java/org/apache/hadoop/fs/FileSystem.java   |   1 -
 .../main/java/org/apache/hadoop/ipc/Client.java |  11 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  34 ++-
 .../org/apache/hadoop/ipc/TestAsyncIPC.java |   2 +-
 .../hadoop/hdfs/AsyncDistributedFileSystem.java | 110 
 .../hadoop/hdfs/DistributedFileSystem.java  |  22 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  45 +++-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 258 +++
 8 files changed, 463 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eded3d10/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 0ecd8b7..9e13a7a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1252,7 +1252,6 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   /**
* Renames Path src to Path dst
* 
-   * Fails if src is a file and dst is a directory.
* Fails if src is a directory and dst is a file.
* Fails if the parent of dst does not exist or is a file.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eded3d10/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index f206861..d59aeb89 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -119,7 +119,8 @@ public class Client implements AutoCloseable {
 
   private static final ThreadLocal callId = new 
ThreadLocal();
   private static final ThreadLocal retryCount = new 
ThreadLocal();
-  private static final ThreadLocal returnValue = new 
ThreadLocal<>();
+  private static final ThreadLocal
+  RETURN_RPC_RESPONSE = new ThreadLocal<>();
   private static final ThreadLocal asynchronousMode =
   new ThreadLocal() {
 @Override
@@ -130,8 +131,8 @@ public class Client implements AutoCloseable {
 
   @SuppressWarnings("unchecked")
   @Unstable
-  public static  Future getReturnValue() {
-return (Future) returnValue.get();
+  public static  Future getReturnRpcResponse() {
+return (Future) RETURN_RPC_RESPONSE.get();
   }
 
   /** Set call id and retry count for the next call. */
@@ -1396,7 +1397,7 @@ public class Client implements AutoCloseable {
 }
   };
 
-  returnValue.set(returnFuture);
+  RETURN_RPC_RESPONSE.set(returnFuture);
   return null;
 } else {
   return getRpcResponse(call, connection);
@@ -1410,7 +1411,7 @@ public class Client implements AutoCloseable {
*  synchronous mode.
*/
   @Unstable
-  static boolean isAsynchronousMode() {
+  public static boolean isAsynchronousMode() {
 return asynchronousMode.get();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eded3d10/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 071e2e8..8fcdb78 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 

[07/50] [abbrv] hadoop git commit: Revert "HADOOP-13168. Support Future.get with timeout in ipc async calls."

2016-06-09 Thread aengineer
Revert "HADOOP-13168. Support Future.get with timeout in ipc async calls."

This reverts commit 42c22f7e3d6e88bf1115f617f6e80326d1ac.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4450d47
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4450d47
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4450d47

Branch: refs/heads/HDFS-7240
Commit: e4450d47f19131818e1c040b6bd8d85ae8250475
Parents: b82c74b
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:16 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:16 2016 -0700

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 119 ++
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  62 +-
 .../apache/hadoop/util/concurrent/AsyncGet.java |  60 -
 .../hadoop/util/concurrent/AsyncGetFuture.java  |  73 ---
 .../org/apache/hadoop/ipc/TestAsyncIPC.java | 124 ---
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  24 +++-
 .../ClientNamenodeProtocolTranslatorPB.java |  33 +++--
 7 files changed, 185 insertions(+), 310 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4450d47/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index d1d5b17..9be4649 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -18,10 +18,46 @@
 
 package org.apache.hadoop.ipc;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.CodedOutputStream;
+import static org.apache.hadoop.ipc.RpcConstants.*;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.EOFException;
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InterruptedIOException;
+import java.io.OutputStream;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.net.SocketTimeoutException;
+import java.net.UnknownHostException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Hashtable;
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import javax.net.SocketFactory;
+import javax.security.sasl.Sasl;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -57,25 +93,14 @@ import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.concurrent.AsyncGet;
-import org.apache.hadoop.util.concurrent.AsyncGetFuture;
 import org.apache.htrace.core.Span;
 import org.apache.htrace.core.Tracer;
 
-import javax.net.SocketFactory;
-import javax.security.sasl.Sasl;
-import java.io.*;
-import java.net.*;
-import java.security.PrivilegedExceptionAction;
-import java.util.*;
-import java.util.Map.Entry;
-import java.util.concurrent.*;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID;
-import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AbstractFuture;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.protobuf.CodedOutputStream;
 
 /** A client for an IPC service.  IPC calls take a single {@link 

[30/50] [abbrv] hadoop git commit: MAPREDUCE-6702. TestMiniMRChildTask.testTaskEnv and TestMiniMRChildTask.testTaskOldEnv are failing (ajisakaa via rkanter)

2016-06-09 Thread aengineer
MAPREDUCE-6702. TestMiniMRChildTask.testTaskEnv and 
TestMiniMRChildTask.testTaskOldEnv are failing (ajisakaa via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/733f3f18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/733f3f18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/733f3f18

Branch: refs/heads/HDFS-7240
Commit: 733f3f18d5cf38cbae35146fbef8e16e35fdf5e1
Parents: 620325e
Author: Robert Kanter 
Authored: Tue Jun 7 15:46:06 2016 -0700
Committer: Robert Kanter 
Committed: Tue Jun 7 15:46:06 2016 -0700

--
 .../src/site/markdown/SingleCluster.md.vm   |  12 +-
 .../java/org/apache/hadoop/mapred/JobConf.java  |   6 -
 .../hadoop/mapred/TestMiniMRChildTask.java  | 233 +++
 3 files changed, 41 insertions(+), 210 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/733f3f18/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm 
b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
index 573ca32..4825e00 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
@@ -181,13 +181,23 @@ You can run a MapReduce job on YARN in a 
pseudo-distributed mode by setting a fe
 
 The following instructions assume that 1. ~ 4. steps of [the above 
instructions](#Execution) are already executed.
 
-1.  Configure parameters as follows:`etc/hadoop/mapred-site.xml`:
+1.  Configure parameters as follows:
+
+`etc/hadoop/mapred-site.xml`:
 
 
 
 mapreduce.framework.name
 yarn
 
+
+mapreduce.admin.user.env
+HADOOP_MAPRED_HOME=$HADOOP_COMMON_HOME
+
+
+yarn.app.mapreduce.am.env
+HADOOP_MAPRED_HOME=$HADOOP_COMMON_HOME
+
 
 
 `etc/hadoop/yarn-site.xml`:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/733f3f18/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index 2cfce1f..f2b0aae 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -294,8 +294,6 @@ public class JobConf extends Configuration {
* Example:
* 
*A=foo - This will set the env variable A to foo. 
-   *B=$X:c This is inherit tasktracker's X env variable on Linux. 
-   *B=%X%;c This is inherit tasktracker's X env variable on Windows. 

* 
* 
* @deprecated Use {@link #MAPRED_MAP_TASK_ENV} or 
@@ -314,8 +312,6 @@ public class JobConf extends Configuration {
* Example:
* 
*A=foo - This will set the env variable A to foo. 
-   *B=$X:c This is inherit tasktracker's X env variable on Linux. 
-   *B=%X%;c This is inherit tasktracker's X env variable on Windows. 

* 
*/
   public static final String MAPRED_MAP_TASK_ENV = JobContext.MAP_ENV;
@@ -330,8 +326,6 @@ public class JobConf extends Configuration {
* Example:
* 
*A=foo - This will set the env variable A to foo. 
-   *B=$X:c This is inherit tasktracker's X env variable on Linux. 
-   *B=%X%;c This is inherit tasktracker's X env variable on Windows. 

* 
*/
   public static final String MAPRED_REDUCE_TASK_ENV = JobContext.REDUCE_ENV;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/733f3f18/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
index cbeeccf..f690118 100644
--- 

[18/50] [abbrv] hadoop git commit: Revert "Revert "HADOOP-13226 Support async call retry and failover.""

2016-06-09 Thread aengineer
Revert "Revert "HADOOP-13226 Support async call retry and failover.""

This reverts commit 5360da8bd9f720384860f411bee081aef13b4bd4.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35f255b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35f255b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35f255b0

Branch: refs/heads/HDFS-7240
Commit: 35f255b03b1bb5c94063ec1818af1d253ceee991
Parents: 7e7b1ae
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:43 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:43 2016 +0800

--
 .../dev-support/findbugsExcludeFile.xml |   8 +-
 .../hadoop/io/retry/AsyncCallHandler.java   | 321 +++
 .../org/apache/hadoop/io/retry/CallReturn.java  |  75 +
 .../hadoop/io/retry/RetryInvocationHandler.java | 134 ++--
 .../apache/hadoop/io/retry/RetryPolicies.java   |   4 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |  25 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  13 +-
 .../apache/hadoop/util/concurrent/AsyncGet.java |  17 +-
 .../org/apache/hadoop/ipc/TestAsyncIPC.java |  10 +-
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |   7 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  42 +--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java|  43 +--
 .../apache/hadoop/hdfs/TestAsyncHDFSWithHA.java | 181 +++
 .../hdfs/server/namenode/ha/HATestUtil.java |   9 +-
 14 files changed, 775 insertions(+), 114 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35f255b0/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index ab8673b..a644aa5 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -345,7 +345,13 @@

  
 
- 
+ 
+ 
+   
+   
+   
+ 
+
  



http://git-wip-us.apache.org/repos/asf/hadoop/blob/35f255b0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
new file mode 100644
index 000..5a03b03
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
@@ -0,0 +1,321 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.retry;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.concurrent.AsyncGet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Method;
+import java.util.LinkedList;
+import java.util.Queue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+
+/** Handle async calls. */
+@InterfaceAudience.Private
+public class AsyncCallHandler {
+  static final Logger LOG = LoggerFactory.getLogger(AsyncCallHandler.class);
+
+  private static final ThreadLocal
+  LOWER_LAYER_ASYNC_RETURN = new ThreadLocal<>();
+  private static final ThreadLocal>
+  ASYNC_RETURN = new ThreadLocal<>();
+
+  /** @return the async return value from {@link AsyncCallHandler}. */
+  

[02/50] [abbrv] hadoop git commit: HDFS-10481. HTTPFS server should correctly impersonate as end user to open file. Contributed by Xiao Chen.

2016-06-09 Thread aengineer
HDFS-10481. HTTPFS server should correctly impersonate as end user to open 
file. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47e0321e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47e0321e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47e0321e

Branch: refs/heads/HDFS-7240
Commit: 47e0321ee91149331e6ae72e7caa41d1de078b6c
Parents: 99a771c
Author: Andrew Wang 
Authored: Fri Jun 3 17:21:17 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 17:21:17 2016 -0700

--
 .../hadoop/fs/http/server/HttpFSServer.java | 218 ++-
 1 file changed, 114 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47e0321e/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index b7b63fa..db4692a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -79,6 +79,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 import java.security.AccessControlException;
+import java.security.PrivilegedExceptionAction;
 import java.text.MessageFormat;
 import java.util.EnumSet;
 import java.util.List;
@@ -94,6 +95,7 @@ import java.util.Map;
 @InterfaceAudience.Private
 public class HttpFSServer {
   private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
+  private static final Logger LOG = 
LoggerFactory.getLogger(HttpFSServer.class);
 
   /**
* Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem 
for the effective
@@ -205,115 +207,123 @@ public class HttpFSServer {
 MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
 MDC.put("hostname", request.getRemoteAddr());
 switch (op.value()) {
-  case OPEN: {
-//Invoking the command directly using an unmanaged FileSystem that is
-// released by the FileSystemReleaseFilter
-FSOperations.FSOpen command = new FSOperations.FSOpen(path);
-FileSystem fs = createFileSystem(user);
-InputStream is = command.execute(fs);
-Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
-Long len = params.get(LenParam.NAME, LenParam.class);
-AUDIT_LOG.info("[{}] offset [{}] len [{}]",
-   new Object[]{path, offset, len});
-InputStreamEntity entity = new InputStreamEntity(is, offset, len);
-response =
+case OPEN: {
+  //Invoking the command directly using an unmanaged FileSystem that is
+  // released by the FileSystemReleaseFilter
+  final FSOperations.FSOpen command = new FSOperations.FSOpen(path);
+  final FileSystem fs = createFileSystem(user);
+  InputStream is = null;
+  UserGroupInformation ugi = UserGroupInformation
+  .createProxyUser(user.getShortUserName(),
+  UserGroupInformation.getLoginUser());
+  try {
+is = ugi.doAs(new PrivilegedExceptionAction() {
+  @Override
+  public InputStream run() throws Exception {
+return command.execute(fs);
+  }
+});
+  } catch (InterruptedException ie) {
+LOG.info("Open interrupted.", ie);
+Thread.currentThread().interrupt();
+  }
+  Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
+  Long len = params.get(LenParam.NAME, LenParam.class);
+  AUDIT_LOG.info("[{}] offset [{}] len [{}]",
+  new Object[] { path, offset, len });
+  InputStreamEntity entity = new InputStreamEntity(is, offset, len);
+  response =
   Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
-break;
-  }
-  case GETFILESTATUS: {
-FSOperations.FSFileStatus command =
-  new FSOperations.FSFileStatus(path);
-Map json = fsExecute(user, command);
-AUDIT_LOG.info("[{}]", path);
-response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-break;
-  }
-  case LISTSTATUS: {
-String filter = params.get(FilterParam.NAME, FilterParam.class);
-FSOperations.FSListStatus command = new FSOperations.FSListStatus(
-  path, filter);
-Map json = fsExecute(user, command);
-AUDIT_LOG.info("[{}] filter [{}]", path,
-   (filter != 

[08/50] [abbrv] hadoop git commit: Revert "HDFS-10346. Implement asynchronous setPermission/setOwner for DistributedFileSystem. Contributed by Xiaobing Zhou"

2016-06-09 Thread aengineer
Revert "HDFS-10346. Implement asynchronous setPermission/setOwner for 
DistributedFileSystem.  Contributed by  Xiaobing Zhou"

This reverts commit 7251bb922b20dae49c8c6854864095fb16d8cbd5.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f23d5dfc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f23d5dfc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f23d5dfc

Branch: refs/heads/HDFS-7240
Commit: f23d5dfc60a017187ae57f3667ac0e688877c2dd
Parents: e4450d4
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:17 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:17 2016 -0700

--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  59 
 .../ClientNamenodeProtocolTranslatorPB.java |  39 +--
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 267 ++-
 .../apache/hadoop/hdfs/TestDFSPermission.java   |  29 +-
 4 files changed, 43 insertions(+), 351 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f23d5dfc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
index 4fe0861..356ae3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AsyncDistributedFileSystem.java
@@ -27,7 +27,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.ipc.Client;
 
@@ -38,9 +37,6 @@ import com.google.common.util.concurrent.AbstractFuture;
  * This instance of this class is the way end-user code interacts
  * with a Hadoop DistributedFileSystem in an asynchronous manner.
  *
- * This class is unstable, so no guarantee is provided as to reliability,
- * stability or compatibility across any level of release granularity.
- *
  */
 @Unstable
 public class AsyncDistributedFileSystem {
@@ -115,59 +111,4 @@ public class AsyncDistributedFileSystem {
   Client.setAsynchronousMode(isAsync);
 }
   }
-
-  /**
-   * Set permission of a path.
-   *
-   * @param p
-   *  the path the permission is set to
-   * @param permission
-   *  the permission that is set to a path.
-   * @return an instance of Future, #get of which is invoked to wait for
-   * asynchronous call being finished.
-   */
-  public Future setPermission(Path p, final FsPermission permission)
-  throws IOException {
-dfs.getFsStatistics().incrementWriteOps(1);
-final Path absPath = dfs.fixRelativePart(p);
-final boolean isAsync = Client.isAsynchronousMode();
-Client.setAsynchronousMode(true);
-try {
-  dfs.getClient().setPermission(dfs.getPathName(absPath), permission);
-  return getReturnValue();
-} finally {
-  Client.setAsynchronousMode(isAsync);
-}
-  }
-
-  /**
-   * Set owner of a path (i.e. a file or a directory). The parameters username
-   * and groupname cannot both be null.
-   *
-   * @param p
-   *  The path
-   * @param username
-   *  If it is null, the original username remains unchanged.
-   * @param groupname
-   *  If it is null, the original groupname remains unchanged.
-   * @return an instance of Future, #get of which is invoked to wait for
-   * asynchronous call being finished.
-   */
-  public Future setOwner(Path p, String username, String groupname)
-  throws IOException {
-if (username == null && groupname == null) {
-  throw new IOException("username == null && groupname == null");
-}
-
-dfs.getFsStatistics().incrementWriteOps(1);
-final Path absPath = dfs.fixRelativePart(p);
-final boolean isAsync = Client.isAsynchronousMode();
-Client.setAsynchronousMode(true);
-try {
-  dfs.getClient().setOwner(dfs.getPathName(absPath), username, groupname);
-  return getReturnValue();
-} finally {
-  Client.setAsynchronousMode(isAsync);
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f23d5dfc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

[29/50] [abbrv] hadoop git commit: YARN-4837. User facing aspects of 'AM blacklisting' feature need fixing. (vinodkv via wangda)

2016-06-09 Thread aengineer
YARN-4837. User facing aspects of 'AM blacklisting' feature need fixing. 
(vinodkv via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/620325e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/620325e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/620325e8

Branch: refs/heads/HDFS-7240
Commit: 620325e81696fca140195b74929ed9eda2d5eb16
Parents: be34e85
Author: Wangda Tan 
Authored: Tue Jun 7 15:06:42 2016 -0700
Committer: Wangda Tan 
Committed: Tue Jun 7 15:06:42 2016 -0700

--
 .../yarn/api/records/AMBlackListingRequest.java |  67 -
 .../records/ApplicationSubmissionContext.java   |  23 --
 .../hadoop/yarn/conf/YarnConfiguration.java |  25 +-
 .../src/main/proto/yarn_protos.proto|   5 -
 .../yarn/conf/TestYarnConfigurationFields.java  |   7 +
 .../impl/pb/AMBlackListingRequestPBImpl.java| 104 
 .../pb/ApplicationSubmissionContextPBImpl.java  |  40 ---
 .../src/main/resources/yarn-default.xml |  19 --
 .../hadoop/yarn/api/TestPBImplRecords.java  |  10 -
 .../blacklist/BlacklistManager.java |   9 +-
 .../blacklist/BlacklistUpdates.java |  47 
 .../blacklist/DisabledBlacklistManager.java |  12 +-
 .../blacklist/SimpleBlacklistManager.java   |  17 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  79 ++
 .../rmapp/attempt/RMAppAttempt.java |   2 +-
 .../rmapp/attempt/RMAppAttemptImpl.java |  85 +--
 .../scheduler/AbstractYarnScheduler.java|   2 +-
 .../scheduler/AppSchedulingInfo.java|  74 +++---
 .../scheduler/SchedulerAppUtils.java|  16 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  33 ++-
 .../scheduler/capacity/CapacityScheduler.java   |  11 +-
 .../allocator/RegularContainerAllocator.java|   2 +-
 .../scheduler/fair/FSLeafQueue.java |   2 +-
 .../scheduler/fair/FairScheduler.java   |   8 +-
 .../scheduler/fifo/FifoScheduler.java   |  12 +-
 .../webapp/RMAppAttemptBlock.java   |   9 +-
 .../resourcemanager/webapp/RMAppBlock.java  |  13 +-
 .../resourcemanager/webapp/RMWebServices.java   |  21 +-
 .../webapp/dao/AMBlackListingRequestInfo.java   |  61 -
 .../webapp/dao/AppAttemptInfo.java  |   8 +-
 .../dao/ApplicationSubmissionContextInfo.java   |  13 -
 .../TestNodeBlacklistingOnAMFailures.java   | 251 +++
 .../applicationsmanager/TestAMRestart.java  | 177 +
 .../blacklist/TestBlacklistManager.java |  29 +--
 .../rmapp/TestRMAppTransitions.java |  58 -
 .../scheduler/TestAppSchedulingInfo.java|  12 +-
 .../capacity/TestCapacityScheduler.java |   8 +-
 .../scheduler/fair/TestFSAppAttempt.java|  12 +-
 .../scheduler/fair/TestFairScheduler.java   |   9 +-
 .../TestRMWebServicesAppsModification.java  |  39 ++-
 40 files changed, 536 insertions(+), 895 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/620325e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
deleted file mode 100644
index 4aec2ba..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.api.records;
-
-import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import 

[44/50] [abbrv] hadoop git commit: YARN-5191. Renamed the newly added “download=true” option for getting logs via NMWebServices and AHSWebServices to be a better "format" option. (Xuan Gong via vinodk

2016-06-09 Thread aengineer
YARN-5191. Renamed the newly added “download=true” option for getting logs 
via NMWebServices and AHSWebServices to be a better "format" option. (Xuan Gong 
via vinodkv)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9378d942
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9378d942
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9378d942

Branch: refs/heads/HDFS-7240
Commit: 9378d9428f127eff7acd6c13544016cdbf2d65fb
Parents: 656c460
Author: Vinod Kumar Vavilapalli 
Authored: Thu Jun 9 12:30:58 2016 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Thu Jun 9 12:30:58 2016 -0700

--
 .../hadoop/yarn/webapp/util/WebAppUtils.java| 18 ++
 .../webapp/AHSWebServices.java  | 36 
 .../nodemanager/webapp/NMWebServices.java   | 33 +++---
 .../nodemanager/webapp/TestNMWebServices.java   | 15 +++-
 4 files changed, 74 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9378d942/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index faf4a77..3aa773a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -24,6 +24,7 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -400,4 +401,21 @@ public class WebAppUtils {
 }
 return aid;
   }
+
+  public static String getSupportedLogContentType(String format) {
+if (format.equalsIgnoreCase("text")) {
+  return "text/plain";
+} else if (format.equalsIgnoreCase("octet-stream")) {
+  return "application/octet-stream";
+}
+return null;
+  }
+
+  public static String getDefaultLogContentType() {
+return "text/plain";
+  }
+
+  public static List listSupportedLogContentType() {
+return Arrays.asList("text", "octet-stream");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9378d942/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 59dbd44..692b172 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -66,6 +66,7 @@ import 
org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import com.google.common.base.Joiner;
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
@@ -212,7 +213,7 @@ public class AHSWebServices extends WebServices {
   @Context HttpServletResponse res,
   @PathParam("containerid") String containerIdStr,
   @PathParam("filename") String filename,
-  @QueryParam("download") String download,
+  @QueryParam("format") String format,
   @QueryParam("size") String size) {
 init(res);
 ContainerId containerId;
@@ -223,9 +224,6 @@ public class AHSWebServices extends WebServices {
   "Invalid ContainerId: " + containerIdStr);
 }
 
-boolean downloadFile = parseBooleanParam(download);
-
-
 final long length = 

[48/50] [abbrv] hadoop git commit: HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in Hadoop. Contributed by Vishwajeet Dusane.

2016-06-09 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9581fb71/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
new file mode 100644
index 000..7f7e749
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/**
+ * Extended Webhdfs PostOpParam to avoid redirect during append operation for
+ * azure data lake storage.
+ */
+
+public class ADLPostOpParam extends HttpOpParam {
+  private static final Domain DOMAIN = new Domain(NAME,
+  Op.class);
+
+  /**
+   * Constructor.
+   *
+   * @param str a string representation of the parameter value.
+   */
+  public ADLPostOpParam(final String str) {
+super(DOMAIN, DOMAIN.parse(str));
+  }
+
+  @Override
+  public final String getName() {
+return NAME;
+  }
+
+  /**
+   * Post operations.
+   */
+  public static enum Op implements HttpOpParam.Op {
+APPEND(true, false, HttpURLConnection.HTTP_OK);
+
+private final boolean redirect;
+private final boolean doOutput;
+private final int expectedHttpResponseCode;
+
+Op(final boolean doOut, final boolean doRedirect,
+final int expectHttpResponseCode) {
+  this.doOutput = doOut;
+  this.redirect = doRedirect;
+  this.expectedHttpResponseCode = expectHttpResponseCode;
+}
+
+@Override
+public Type getType() {
+  return Type.POST;
+}
+
+@Override
+public boolean getRequireAuth() {
+  return false;
+}
+
+@Override
+public boolean getDoOutput() {
+  return doOutput;
+}
+
+@Override
+public boolean getRedirect() {
+  return redirect;
+}
+
+@Override
+public int getExpectedHttpResponseCode() {
+  return expectedHttpResponseCode;
+}
+
+/**
+ * @return a URI query string.
+ */
+@Override
+public String toQueryString() {
+  return NAME + "=" + this;
+}
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9581fb71/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
new file mode 100644
index 000..d300a1c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/**
+ * Extended Webhdfs PutOpParam to avoid redirect during Create operation for
+ * azure data lake storage.
+ */
+public class ADLPutOpParam extends HttpOpParam {
+  private static final Domain DOMAIN = new Domain(NAME, Op.class);
+
+  

[27/50] [abbrv] hadoop git commit: HDFS-10468. HDFS read ends up ignoring an interrupt. Contributed by Jing Zhao

2016-06-09 Thread aengineer
HDFS-10468. HDFS read ends up ignoring an interrupt. Contributed by Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be34e85e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be34e85e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be34e85e

Branch: refs/heads/HDFS-7240
Commit: be34e85e682880f46eee0310bf00ecc7d39cd5bd
Parents: c14c1b2
Author: Jing Zhao 
Authored: Tue Jun 7 10:48:21 2016 -0700
Committer: Jing Zhao 
Committed: Tue Jun 7 10:48:21 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 36 ++--
 .../java/org/apache/hadoop/hdfs/TestRead.java   | 87 
 .../server/datanode/SimulatedFSDataset.java |  4 +-
 3 files changed, 119 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be34e85e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 2ed0abd..7f32a56 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs;
 
 import java.io.EOFException;
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
+import java.nio.channels.ClosedByInterruptException;
 import java.util.AbstractMap;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -304,7 +306,7 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(waitTime);
 } catch (InterruptedException e) {
-  throw new IOException(
+  throw new InterruptedIOException(
   "Interrupted while getting the last block length.");
 }
   }
@@ -379,6 +381,7 @@ public class DFSInputStream extends FSInputStream
   return n;
 }
   } catch (IOException ioe) {
+checkInterrupted(ioe);
 if (ioe instanceof RemoteException) {
   if (((RemoteException) ioe).unwrapRemoteException() instanceof
   ReplicaNotFoundException) {
@@ -414,7 +417,8 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(500); // delay between retries.
 } catch (InterruptedException e) {
-  throw new IOException("Interrupted while getting the length.");
+  throw new InterruptedIOException(
+  "Interrupted while getting the length.");
 }
   }
 
@@ -660,6 +664,7 @@ public class DFSInputStream extends FSInputStream
 }
 return chosenNode;
   } catch (IOException ex) {
+checkInterrupted(ex);
 if (ex instanceof InvalidEncryptionKeyException && 
refetchEncryptionKey > 0) {
   DFSClient.LOG.info("Will fetch a new encryption key and retry, "
   + "encryption key was invalid when connecting to " + targetAddr
@@ -681,6 +686,15 @@ public class DFSInputStream extends FSInputStream
 }
   }
 
+  private void checkInterrupted(IOException e) throws IOException {
+if (Thread.currentThread().isInterrupted() &&
+(e instanceof ClosedByInterruptException ||
+e instanceof InterruptedIOException)) {
+  DFSClient.LOG.debug("The reading thread has been interrupted.", e);
+  throw e;
+}
+  }
+
   protected BlockReader getBlockReader(LocatedBlock targetBlock,
   long offsetInBlock, long length, InetSocketAddress targetAddr,
   StorageType storageType, DatanodeInfo datanode) throws IOException {
@@ -948,6 +962,7 @@ public class DFSInputStream extends FSInputStream
 } catch (ChecksumException ce) {
   throw ce;
 } catch (IOException e) {
+  checkInterrupted(e);
   if (retries == 1) {
 DFSClient.LOG.warn("DFS Read", e);
   }
@@ -1044,9 +1059,12 @@ public class DFSInputStream extends FSInputStream
   // expanding time window for each failure
   timeWindow * (failures + 1) *
   ThreadLocalRandom.current().nextDouble();
-  DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " 
IOException, will wait for " + waitTime + " msec.");
+  DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) +
+  " IOException, will wait for " + waitTime + " msec.");
   Thread.sleep((long)waitTime);
-} catch (InterruptedException ignored) {
+} catch 

[22/50] [abbrv] hadoop git commit: YARN-5185. StageAllocaterGreedyRLE: Fix NPE in corner case. (Carlo Curino via asuresh)

2016-06-09 Thread aengineer
YARN-5185. StageAllocaterGreedyRLE: Fix NPE in corner case. (Carlo Curino via 
asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a9b7372
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a9b7372
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a9b7372

Branch: refs/heads/HDFS-7240
Commit: 7a9b7372a1a917c7b5e1beca7e13c0419e3dbfef
Parents: 6de9213
Author: Arun Suresh 
Authored: Mon Jun 6 21:06:52 2016 -0700
Committer: Arun Suresh 
Committed: Mon Jun 6 21:06:52 2016 -0700

--
 .../planning/StageAllocatorGreedyRLE.java   | 16 
 1 file changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a9b7372/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
index c5a3192..5e748fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
@@ -168,12 +168,20 @@ public class StageAllocatorGreedyRLE implements 
StageAllocator {
   if (allocateLeft) {
 // set earliest start to the min of the constraining "range" or my the
 // end of this allocation
-stageEarliestStart =
-Math.min(partialMap.higherKey(minPoint), stageEarliestStart + dur);
+if(partialMap.higherKey(minPoint) == null){
+  stageEarliestStart = stageEarliestStart + dur;
+} else {
+  stageEarliestStart =
+ Math.min(partialMap.higherKey(minPoint), stageEarliestStart + 
dur);
+}
   } else {
 // same as above moving right-to-left
-stageDeadline =
-Math.max(partialMap.higherKey(minPoint), stageDeadline - dur);
+if(partialMap.higherKey(minPoint) == null){
+  stageDeadline = stageDeadline - dur;
+} else {
+  stageDeadline =
+  Math.max(partialMap.higherKey(minPoint), stageDeadline - dur);
+}
   }
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: YARN-5199. Close LogReader in in AHSWebServices#getStreamingOutput and FileInputStream in NMWebServices#getLogs. Contributed by Xuan Gong

2016-06-09 Thread aengineer
YARN-5199. Close LogReader in in AHSWebServices#getStreamingOutput and
FileInputStream in NMWebServices#getLogs. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58be55b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58be55b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58be55b6

Branch: refs/heads/HDFS-7240
Commit: 58be55b6e07b94aa55ed87c461f3e5c04cc61630
Parents: 8554aee1b
Author: Xuan 
Authored: Tue Jun 7 16:07:02 2016 -0700
Committer: Xuan 
Committed: Tue Jun 7 16:07:02 2016 -0700

--
 .../webapp/AHSWebServices.java  | 155 ++-
 .../nodemanager/webapp/NMWebServices.java   |  71 +
 2 files changed, 118 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58be55b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index d91ae55..59dbd44 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -40,7 +40,6 @@ import javax.ws.rs.core.Response;
 import javax.ws.rs.core.StreamingOutput;
 import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.Response.Status;
-
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -363,86 +362,94 @@ public class AHSWebServices extends WebServices {
   if ((nodeId == null || nodeName.contains(LogAggregationUtils
   .getNodeString(nodeId))) && !nodeName.endsWith(
   LogAggregationUtils.TMP_FILE_SUFFIX)) {
-AggregatedLogFormat.LogReader reader =
-new AggregatedLogFormat.LogReader(conf,
-thisNodeFile.getPath());
-DataInputStream valueStream;
-LogKey key = new LogKey();
-valueStream = reader.next(key);
-while (valueStream != null && !key.toString()
-.equals(containerIdStr)) {
-  // Next container
-  key = new LogKey();
+AggregatedLogFormat.LogReader reader = null;
+try {
+  reader = new AggregatedLogFormat.LogReader(conf,
+  thisNodeFile.getPath());
+  DataInputStream valueStream;
+  LogKey key = new LogKey();
   valueStream = reader.next(key);
-}
-if (valueStream == null) {
-  continue;
-}
-while (true) {
-  try {
-String fileType = valueStream.readUTF();
-String fileLengthStr = valueStream.readUTF();
-long fileLength = Long.parseLong(fileLengthStr);
-if (fileType.equalsIgnoreCase(logFile)) {
-  StringBuilder sb = new StringBuilder();
-  sb.append("LogType:");
-  sb.append(fileType + "\n");
-  sb.append("Log Upload Time:");
-  sb.append(Times.format(System.currentTimeMillis()) + "\n");
-  sb.append("LogLength:");
-  sb.append(fileLengthStr + "\n");
-  sb.append("Log Contents:\n");
-  byte[] b = sb.toString().getBytes(Charset.forName("UTF-8"));
-  os.write(b, 0, b.length);
-
-  long toSkip = 0;
-  long totalBytesToRead = fileLength;
-  if (bytes < 0) {
-long absBytes = Math.abs(bytes);
-if (absBytes < fileLength) {
-  toSkip = fileLength - absBytes;
-  totalBytesToRead = absBytes;
+  while (valueStream != null && !key.toString()
+  .equals(containerIdStr)) {
+// Next container
+key = new 

[43/50] [abbrv] hadoop git commit: HADOOP-13237: s3a initialization against public bucket fails if caller lacks any credentials. Contributed by Chris Nauroth

2016-06-09 Thread aengineer
HADOOP-13237: s3a initialization against public bucket fails if caller lacks 
any credentials. Contributed by Chris Nauroth


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/656c460c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/656c460c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/656c460c

Branch: refs/heads/HDFS-7240
Commit: 656c460c0e79ee144d6ef48d85cec04a1af3b2cc
Parents: 8ea9bbc
Author: Steve Loughran 
Authored: Thu Jun 9 16:36:27 2016 +0100
Committer: Steve Loughran 
Committed: Thu Jun 9 17:28:49 2016 +0100

--
 .../src/main/resources/core-default.xml | 13 -
 .../fs/s3a/AnonymousAWSCredentialsProvider.java | 11 
 .../fs/s3a/BasicAWSCredentialsProvider.java |  8 +++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 22 +---
 .../src/site/markdown/tools/hadoop-aws/index.md | 14 -
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   | 55 
 6 files changed, 113 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/656c460c/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index a65246b..8bb27ea 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -791,7 +791,18 @@
 
 
   fs.s3a.aws.credentials.provider
-  Class name of a credentials provider that implements 
com.amazonaws.auth.AWSCredentialsProvider. Omit if using access/secret keys or 
another authentication mechanism.
+  
+Class name of a credentials provider that implements
+com.amazonaws.auth.AWSCredentialsProvider.  Omit if using access/secret 
keys
+or another authentication mechanism.  The specified class must provide an
+accessible constructor accepting java.net.URI and
+org.apache.hadoop.conf.Configuration, or an accessible default constructor.
+Specifying org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider allows
+anonymous access to a publicly accessible S3 bucket without any 
credentials.
+Please note that allowing anonymous access to an S3 bucket compromises
+security and therefore is unsuitable for most use cases.  It can be useful
+for accessing public data sets without requiring AWS credentials.
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/656c460c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
index e62ec77..2c863fc 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
@@ -24,6 +24,17 @@ import com.amazonaws.auth.AWSCredentials;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+/**
+ * AnonymousAWSCredentialsProvider supports anonymous access to AWS services
+ * through the AWS SDK.  AWS requests will not be signed.  This is not suitable
+ * for most cases, because allowing anonymous access to an S3 bucket 
compromises
+ * security.  This can be useful for accessing public data sets without
+ * requiring AWS credentials.
+ *
+ * Please note that users may reference this class name from configuration
+ * property fs.s3a.aws.credentials.provider.  Therefore, changing the class 
name
+ * would be a backward-incompatible change.
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public class AnonymousAWSCredentialsProvider implements AWSCredentialsProvider 
{

http://git-wip-us.apache.org/repos/asf/hadoop/blob/656c460c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
index 2f721e4..3a5ee8c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
+++ 

[33/50] [abbrv] hadoop git commit: YARN-5176. More test cases for queuing of containers at the NM. (Konstantinos Karanasos via asuresh)

2016-06-09 Thread aengineer
YARN-5176. More test cases for queuing of containers at the NM. (Konstantinos 
Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76f0800c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76f0800c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76f0800c

Branch: refs/heads/HDFS-7240
Commit: 76f0800c21f49fba01694cbdc870103053da802c
Parents: 58be55b
Author: Arun Suresh 
Authored: Tue Jun 7 17:16:18 2016 -0700
Committer: Arun Suresh 
Committed: Tue Jun 7 17:16:18 2016 -0700

--
 .../queuing/QueuingContainerManagerImpl.java|  11 +
 .../BaseContainerManagerTest.java   |  64 +++
 .../containermanager/TestContainerManager.java  |  76 +---
 .../queuing/TestQueuingContainerManager.java| 388 +++
 4 files changed, 391 insertions(+), 148 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76f0800c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
index 1ce3356..a1e3bdb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
@@ -160,6 +160,7 @@ public class QueuingContainerManagerImpl extends 
ContainerManagerImpl {
   containerTokenId.getExecutionType());
 
   if (foundInQueue) {
+LOG.info("Removing queued container with ID " + containerID);
 this.context.getQueuingContext().getKilledQueuedContainers().put(
 containerTokenId,
 "Queued container request removed by ApplicationMaster.");
@@ -502,6 +503,16 @@ public class QueuingContainerManagerImpl extends 
ContainerManagerImpl {
 return allocatedOpportunisticContainers.size();
   }
 
+  @VisibleForTesting
+  public int getNumQueuedGuaranteedContainers() {
+return queuedGuaranteedContainers.size();
+  }
+
+  @VisibleForTesting
+  public int getNumQueuedOpportunisticContainers() {
+return queuedOpportunisticContainers.size();
+  }
+
   class QueuingApplicationEventDispatcher implements
   EventHandler {
 private EventHandler applicationEventDispatcher;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76f0800c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index ab60288..4f0e5c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -40,10 +40,17 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import 

[38/50] [abbrv] hadoop git commit: MAPREDUCE-6240. Hadoop client displays confusing error message. (gera)

2016-06-09 Thread aengineer
MAPREDUCE-6240. Hadoop client displays confusing error message. (gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0af96a1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0af96a1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0af96a1c

Branch: refs/heads/HDFS-7240
Commit: 0af96a1c08594c809ecb254cee4f60dd22399772
Parents: 1ee9ea0
Author: Gera Shegalov 
Authored: Sat May 28 22:01:07 2016 -0700
Committer: Gera Shegalov 
Committed: Wed Jun 8 12:59:37 2016 -0700

--
 .../org/apache/hadoop/mapreduce/Cluster.java| 15 ++-
 .../TestClientProtocolProviderImpls.java| 26 +---
 2 files changed, 32 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0af96a1c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
index 9563c0b..6ca918d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
@@ -102,6 +102,10 @@ public class Cluster {
   throws IOException {
 
 initProviderList();
+final IOException initEx = new IOException(
+"Cannot initialize Cluster. Please check your configuration for "
++ MRConfig.FRAMEWORK_NAME
++ " and the correspond server addresses.");
 for (ClientProtocolProvider provider : providerList) {
   LOG.debug("Trying ClientProtocolProvider : "
   + provider.getClass().getName());
@@ -124,16 +128,15 @@ public class Cluster {
   + " as the ClientProtocolProvider - returned null protocol");
 }
   } catch (Exception e) {
-LOG.info("Failed to use " + provider.getClass().getName()
-+ " due to error: ", e);
+final String errMsg = "Failed to use " + provider.getClass().getName()
++ " due to error: ";
+initEx.addSuppressed(new IOException(errMsg, e));
+LOG.info(errMsg, e);
   }
 }
 
 if (null == clientProtocolProvider || null == client) {
-  throw new IOException(
-  "Cannot initialize Cluster. Please check your configuration for "
-  + MRConfig.FRAMEWORK_NAME
-  + " and the correspond server addresses.");
+  throw initEx;
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0af96a1c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
index 6ad76e9..500e133 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
@@ -18,17 +18,20 @@
 
 package org.apache.hadoop.mapreduce;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.mapred.LocalJobRunner;
 import org.apache.hadoop.mapred.YARNRunner;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
 
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 public class TestClientProtocolProviderImpls {
 
   @Test
@@ -76,4 +79,21 @@ public class TestClientProtocolProviderImpls {
   "Cannot initialize Cluster. Please check"));
 }
   }
+
+  @Test
+  public void testClusterExceptionRootCause() throws Exception {
+final Configuration 

[16/50] [abbrv] hadoop git commit: Revert "Revert "HDFS-10431 Refactor and speedup TestAsyncDFSRename. Contributed by Xiaobing Zhou""

2016-06-09 Thread aengineer
Revert "Revert "HDFS-10431 Refactor and speedup TestAsyncDFSRename.  
Contributed by Xiaobing Zhou""

This reverts commit 5ee5912ebd541d5b4c33ecd46dfdebe1e23b56c3.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db41e6d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db41e6d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db41e6d2

Branch: refs/heads/HDFS-7240
Commit: db41e6d285a3b425ffd7c11c7baa8253c7929439
Parents: b3d81f3
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:31:34 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:31:34 2016 +0800

--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 233 +++-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 563 ---
 2 files changed, 313 insertions(+), 483 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db41e6d2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
index 67262dd..ddcf492 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
@@ -29,13 +29,16 @@ import static 
org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Random;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 
@@ -43,15 +46,21 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.TestDFSPermission.PermissionGenerator;
 import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
 import org.apache.hadoop.ipc.AsyncCallLimitExceededException;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -63,21 +72,28 @@ import com.google.common.collect.Lists;
  * */
 public class TestAsyncDFS {
   public static final Log LOG = LogFactory.getLog(TestAsyncDFS.class);
-  private static final int NUM_TESTS = 1000;
+  private final short replFactor = 1;
+  private final long blockSize = 512;
+  private long fileLen = blockSize * 3;
+  private final long seed = Time.now();
+  private final Random r = new Random(seed);
+  private final PermissionGenerator permGenerator = new PermissionGenerator(r);
+  private static final int NUM_TESTS = 50;
   private static final int NUM_NN_HANDLER = 10;
-  private static final int ASYNC_CALL_LIMIT = 100;
+  private static final int ASYNC_CALL_LIMIT = 1000;
 
   private Configuration conf;
   private MiniDFSCluster cluster;
   private FileSystem fs;
+  private AsyncDistributedFileSystem adfs;
 
   @Before
   public void setup() throws IOException {
 conf = new HdfsConfiguration();
 // explicitly turn on acl
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
-// explicitly turn on ACL
-conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+// explicitly turn on permission checking
+conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
 // set the limit of max async calls
 conf.setInt(CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
 ASYNC_CALL_LIMIT);
@@ -86,6 +102,7 @@ public class TestAsyncDFS {
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
 cluster.waitActive();
 fs = FileSystem.get(conf);
+adfs = 

[41/50] [abbrv] hadoop git commit: HDFS-10508. DFSInputStream should set thread's interrupt status after catching InterruptException from sleep. Contributed by Jing Zhao.

2016-06-09 Thread aengineer
HDFS-10508. DFSInputStream should set thread's interrupt status after catching 
InterruptException from sleep. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ea9bbce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ea9bbce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ea9bbce

Branch: refs/heads/HDFS-7240
Commit: 8ea9bbce2614e8eb499af73589f021ed1789e78f
Parents: 1500a0a
Author: Masatake Iwasaki 
Authored: Thu Jun 9 14:52:29 2016 +0900
Committer: Masatake Iwasaki 
Committed: Thu Jun 9 14:52:29 2016 +0900

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ea9bbce/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 7f32a56..6132f83 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -306,6 +306,7 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(waitTime);
 } catch (InterruptedException e) {
+  Thread.currentThread().interrupt();
   throw new InterruptedIOException(
   "Interrupted while getting the last block length.");
 }
@@ -417,6 +418,7 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(500); // delay between retries.
 } catch (InterruptedException e) {
+  Thread.currentThread().interrupt();
   throw new InterruptedIOException(
   "Interrupted while getting the length.");
 }
@@ -1063,6 +1065,7 @@ public class DFSInputStream extends FSInputStream
   " IOException, will wait for " + waitTime + " msec.");
   Thread.sleep((long)waitTime);
 } catch (InterruptedException e) {
+  Thread.currentThread().interrupt();
   throw new InterruptedIOException(
   "Interrupted while choosing DataNode for read.");
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: Revert "Revert "HADOOP-12957. Limit the number of outstanding async calls. Contributed by Xiaobing Zhou""

2016-06-09 Thread aengineer
Revert "Revert "HADOOP-12957. Limit the number of outstanding async calls.  
Contributed by Xiaobing Zhou""

This reverts commit 4d36b221a24e3b626bb91093b0bb0fd377061cae.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa20fa15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa20fa15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa20fa15

Branch: refs/heads/HDFS-7240
Commit: aa20fa150d522b9fe469dd99a8e24d7e27d888ea
Parents: eded3d1
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jun 6 16:28:47 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jun 6 16:28:47 2016 +0800

--
 .../hadoop/fs/CommonConfigurationKeys.java  |   3 +
 .../ipc/AsyncCallLimitExceededException.java|  36 +++
 .../main/java/org/apache/hadoop/ipc/Client.java |  66 -
 .../org/apache/hadoop/ipc/TestAsyncIPC.java | 199 ++--
 .../hadoop/hdfs/AsyncDistributedFileSystem.java |  12 +-
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 238 +--
 6 files changed, 445 insertions(+), 109 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa20fa15/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 86e1b43..06614db 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -324,6 +324,9 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT =
 4*60*60; // 4 hours
   
+  public static final String  IPC_CLIENT_ASYNC_CALLS_MAX_KEY =
+  "ipc.client.async.calls.max";
+  public static final int IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT = 100;
   public static final String  IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = 
"ipc.client.fallback-to-simple-auth-allowed";
   public static final boolean 
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa20fa15/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
new file mode 100644
index 000..db97b6c
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AsyncCallLimitExceededException.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.io.IOException;
+
+/**
+ * Signals that an AsyncCallLimitExceededException has occurred. This class is
+ * used to make application code using async RPC aware that limit of max async
+ * calls is reached, application code need to retrieve results from response of
+ * established async calls to avoid buffer overflow in order for follow-on 
async
+ * calls going correctly.
+ */
+public class AsyncCallLimitExceededException extends IOException {
+  private static final long serialVersionUID = 1L;
+
+  public AsyncCallLimitExceededException(String message) {
+super(message);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa20fa15/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 

[25/50] [abbrv] hadoop git commit: HDFS-10485. Fix findbugs warning in FSEditLog.java. (aajisaka)

2016-06-09 Thread aengineer
HDFS-10485. Fix findbugs warning in FSEditLog.java. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6205303
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6205303
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6205303

Branch: refs/heads/HDFS-7240
Commit: e620530301fd3e62537d4b7bc3d8ed296bda1ffc
Parents: bddea5f
Author: Akira Ajisaka 
Authored: Tue Jun 7 17:52:03 2016 +0900
Committer: Akira Ajisaka 
Committed: Tue Jun 7 17:52:55 2016 +0900

--
 .../apache/hadoop/hdfs/server/namenode/FSEditLog.java| 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6205303/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 809d9e6..57229da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -27,6 +27,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -174,7 +175,7 @@ public class FSEditLog implements LogsPurgeable {
   
   // these are statistics counters.
   private long numTransactions;// number of transactions
-  private long numTransactionsBatchedInSync;
+  private final AtomicLong numTransactionsBatchedInSync = new AtomicLong();
   private long totalTimeTransactions;  // total time for all transactions
   private NameNodeMetrics metrics;
 
@@ -672,7 +673,7 @@ public class FSEditLog implements LogsPurgeable {
   if (metrics != null) { // Metrics non-null only when used inside name 
node
 metrics.addSync(elapsed);
 metrics.incrTransactionsBatchedInSync(editsBatchedInSync);
-numTransactionsBatchedInSync += editsBatchedInSync;
+numTransactionsBatchedInSync.addAndGet(editsBatchedInSync);
   }
   
 } finally {
@@ -712,7 +713,7 @@ public class FSEditLog implements LogsPurgeable {
 buf.append(" Total time for transactions(ms): ");
 buf.append(totalTimeTransactions);
 buf.append(" Number of transactions batched in Syncs: ");
-buf.append(numTransactionsBatchedInSync);
+buf.append(numTransactionsBatchedInSync.get());
 buf.append(" Number of syncs: ");
 buf.append(editLogStream.getNumSync());
 buf.append(" SyncTimes(ms): ");
@@ -1281,7 +1282,9 @@ public class FSEditLog implements LogsPurgeable {
 "Cannot start log segment at txid %s when next expected " +
 "txid is %s", segmentTxId, txid + 1);
 
-numTransactions = totalTimeTransactions = numTransactionsBatchedInSync = 0;
+numTransactions = 0;
+totalTimeTransactions = 0;
+numTransactionsBatchedInSync.set(0L);
 
 // TODO no need to link this back to storage anymore!
 // See HDFS-2174.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: HADOOP-12893. Verify LICENSE.txt and NOTICE.txt. Contributed by Xiao Chen, Akira Ajisaka, and Andrew Wang.

2016-06-09 Thread aengineer
HADOOP-12893. Verify LICENSE.txt and NOTICE.txt. Contributed by Xiao Chen, 
Akira Ajisaka, and Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e383b732
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e383b732
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e383b732

Branch: refs/heads/HDFS-7240
Commit: e383b732c54c542482b0b836e2d2c46eb49b4e2d
Parents: 58c3486
Author: Andrew Wang 
Authored: Thu Jun 9 13:54:14 2016 -0700
Committer: Andrew Wang 
Committed: Thu Jun 9 13:54:14 2016 -0700

--
 LICENSE.txt | 1017 +-
 NOTICE.txt  |  266 ++
 hadoop-build-tools/pom.xml  |   41 ++
 hadoop-project-dist/pom.xml |2 +
 hadoop-project/pom.xml  |   19 +-
 pom.xml |   26 +
 6 files changed, 1367 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e383b732/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 929e2a8..44880df 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -320,7 +320,9 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 
OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-For com.google.re2j.* classes:
+The binary distribution of this product bundles these dependencies under the
+following license:
+re2j 1.0
 -
 This is a work derived from Russ Cox's RE2 in Go, whose license
 http://golang.org/LICENSE is as follows:
@@ -548,12 +550,14 @@ 
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
 hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
 hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
+And the binary distribution of this product bundles these dependencies under 
the
+following license:
+Mockito 1.8.5
+SLF4J 1.7.10
 

 
 The MIT License (MIT)
 
-Copyright (c) 2011-2016 Twitter, Inc.
-
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
 in the Software without restriction, including without limitation the rights
@@ -648,3 +652,1010 @@ 
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3.v3.js
 
 D3 is available under a 3-clause BSD license. For details, see:
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3-LICENSE
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+HSQLDB Database 2.0.0
+
+"COPYRIGHTS AND LICENSES (based on BSD License)
+
+For work developed by the HSQL Development Group:
+
+Copyright (c) 2001-2016, The HSQL Development Group
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+Neither the name of the HSQL Development Group nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS""
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG,
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+For work originally developed by the Hypersonic SQL Group:
+
+Copyright (c) 1995-2000 by the Hypersonic SQL Group.
+All rights reserved.
+Redistribution and use in source and binary forms, with or without

[46/50] [abbrv] hadoop git commit: HADOOP-13175. Remove hadoop-ant from hadoop-tools. Contributed by Chris Douglas.

2016-06-09 Thread aengineer
HADOOP-13175. Remove hadoop-ant from hadoop-tools. Contributed by Chris Douglas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58c34868
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58c34868
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58c34868

Branch: refs/heads/HDFS-7240
Commit: 58c3486850c0503aecdeae8b67bb7e6bc42b4da8
Parents: 31ffaf7
Author: Andrew Wang 
Authored: Thu Jun 9 13:49:52 2016 -0700
Committer: Andrew Wang 
Committed: Thu Jun 9 13:49:52 2016 -0700

--
 hadoop-project/pom.xml  |   5 -
 hadoop-tools/hadoop-ant/pom.xml |  56 -
 .../java/org/apache/hadoop/ant/DfsTask.java | 220 ---
 .../ant/condition/DfsBaseConditional.java   |  68 --
 .../apache/hadoop/ant/condition/DfsExists.java  |  24 --
 .../apache/hadoop/ant/condition/DfsIsDir.java   |  24 --
 .../apache/hadoop/ant/condition/DfsZeroLen.java |  24 --
 .../resources/org/apache/hadoop/ant/antlib.xml  |  29 ---
 hadoop-tools/hadoop-tools-dist/pom.xml  |   6 -
 hadoop-tools/pom.xml|   1 -
 10 files changed, 457 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58c34868/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 4c618a1..35166b1 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -375,11 +375,6 @@
 hadoop-extras
 ${project.version}
   
-  
-org.apache.hadoop
-hadoop-ant
-${project.version}
-  
 
   
 org.apache.hadoop

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58c34868/hadoop-tools/hadoop-ant/pom.xml
--
diff --git a/hadoop-tools/hadoop-ant/pom.xml b/hadoop-tools/hadoop-ant/pom.xml
deleted file mode 100644
index e0b038e..000
--- a/hadoop-tools/hadoop-ant/pom.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-
-
-http://maven.apache.org/POM/4.0.0;
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-  http://maven.apache.org/xsd/maven-4.0.0.xsd;>
-  4.0.0
-  
-org.apache.hadoop
-hadoop-project
-3.0.0-alpha1-SNAPSHOT
-../../hadoop-project
-  
-  org.apache.hadoop
-  hadoop-ant
-  3.0.0-alpha1-SNAPSHOT
-  Apache Hadoop Ant Tasks
-  Apache Hadoop Ant Tasks
-  jar
-
-  
-
-  org.apache.ant
-  ant
-  provided
-
-
-  org.apache.hadoop
-  hadoop-annotations
-  provided
-
-
-  org.apache.hadoop
-  hadoop-common
-  provided
-
-
-  org.apache.hadoop
-  hadoop-hdfs-client
-  provided
-
-  
-
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58c34868/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java
--
diff --git 
a/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java 
b/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java
deleted file mode 100644
index 36119f5..000
--- a/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ant;
-
-import java.io.ByteArrayOutputStream;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.io.UnsupportedEncodingException;
-import java.security.AccessController;
-import java.security.PrivilegedAction;
-import java.util.LinkedList;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.tools.ant.AntClassLoader;
-import org.apache.tools.ant.BuildException;
-import org.apache.tools.ant.Task;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * {@link 

[01/50] [abbrv] hadoop git commit: HDFS-7767. Use the noredirect flag in WebHDFS to allow web browsers to upload files via the NN UI (Ravi Prakash via aw)

2016-06-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 54f7975df -> 23923086d


HDFS-7767. Use the noredirect flag in WebHDFS to allow web browsers to upload 
files via the NN UI (Ravi Prakash via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99a771cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99a771cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99a771cd

Branch: refs/heads/HDFS-7240
Commit: 99a771cd7a3f792a76ac89c406b82a983c059d28
Parents: 15f0184
Author: Allen Wittenauer 
Authored: Fri Jun 3 17:07:39 2016 -0700
Committer: Allen Wittenauer 
Committed: Fri Jun 3 17:07:39 2016 -0700

--
 .../src/main/webapps/hdfs/explorer.html | 25 +-
 .../src/main/webapps/hdfs/explorer.js   | 51 
 2 files changed, 74 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99a771cd/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 5106006..51f72e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -119,6 +119,23 @@
   
 
 
+
+  
+ 
+   
+ Upload File
+   
+   
+ 
+   
+   
+ Close
+ Upload
+   
+ 
+  
+
   
 
   
@@ -142,7 +159,7 @@
   
 
   
-  
+  
 
   
 
@@ -152,12 +169,16 @@
   
 
   
-  
+  
 
 
 
+
+
+
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99a771cd/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index adb83a8..6fa5f19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -366,5 +366,56 @@
 });
   })
 
+  $('#modal-upload-file-button').click(function() {
+$(this).prop('disabled', true);
+$(this).button('complete');
+var files = []
+var numCompleted = 0
+
+for(var i = 0; i < $('#modal-upload-file-input').prop('files').length; 
i++) {
+  (function() {
+var file = $('#modal-upload-file-input').prop('files')[i];
+var url = '/webhdfs/v1' + current_directory;
+url = encode_path(append_path(url, file.name));
+url += '?op=CREATE=true';
+files.push( { file: file } )
+files[i].request = $.ajax({
+  type: 'PUT',
+  url: url,
+  processData: false,
+  crossDomain: true
+});
+  })()
+ }
+for(var f in files) {
+  (function() {
+var file = files[f];
+file.request.done(function(data) {
+  var url = data['Location'];
+  $.ajax({
+type: 'PUT',
+url: url,
+data: file.file,
+processData: false,
+crossDomain: true
+  }).complete(function(data) {
+numCompleted++;
+if(numCompleted == files.length) {
+  $('#modal-upload-file').modal('hide');
+  $('#modal-upload-file-button').button('reset');
+  browse_directory(current_directory);
+}
+  }).error(function(jqXHR, textStatus, errorThrown) {
+numCompleted++;
+show_err_msg("Couldn't upload the file " + file.file.name + ". "+ 
errorThrown);
+  });
+}).error(function(jqXHR, textStatus, errorThrown) {
+  numCompleted++;
+  show_err_msg("Couldn't find datanode to write file. " + errorThrown);
+});
+  })();
+}
+  });
+
   init();
 })();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: Revert "HDFS-10431 Refactor and speedup TestAsyncDFSRename. Contributed by Xiaobing Zhou"

2016-06-09 Thread aengineer
Revert "HDFS-10431 Refactor and speedup TestAsyncDFSRename.  Contributed by 
Xiaobing Zhou"

This reverts commit f4b9bcd87c66a39f0c93983431630e9d1b6e36d3.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ee5912e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ee5912e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ee5912e

Branch: refs/heads/HDFS-7240
Commit: 5ee5912ebd541d5b4c33ecd46dfdebe1e23b56c3
Parents: 8cf47d8
Author: Andrew Wang 
Authored: Fri Jun 3 18:09:13 2016 -0700
Committer: Andrew Wang 
Committed: Fri Jun 3 18:09:13 2016 -0700

--
 .../org/apache/hadoop/hdfs/TestAsyncDFS.java| 233 +---
 .../apache/hadoop/hdfs/TestAsyncDFSRename.java  | 563 +++
 2 files changed, 483 insertions(+), 313 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ee5912e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
index ddcf492..67262dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAsyncDFS.java
@@ -29,16 +29,13 @@ import static 
org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 
@@ -46,21 +43,15 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.TestDFSPermission.PermissionGenerator;
 import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
 import org.apache.hadoop.ipc.AsyncCallLimitExceededException;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -72,28 +63,21 @@ import com.google.common.collect.Lists;
  * */
 public class TestAsyncDFS {
   public static final Log LOG = LogFactory.getLog(TestAsyncDFS.class);
-  private final short replFactor = 1;
-  private final long blockSize = 512;
-  private long fileLen = blockSize * 3;
-  private final long seed = Time.now();
-  private final Random r = new Random(seed);
-  private final PermissionGenerator permGenerator = new PermissionGenerator(r);
-  private static final int NUM_TESTS = 50;
+  private static final int NUM_TESTS = 1000;
   private static final int NUM_NN_HANDLER = 10;
-  private static final int ASYNC_CALL_LIMIT = 1000;
+  private static final int ASYNC_CALL_LIMIT = 100;
 
   private Configuration conf;
   private MiniDFSCluster cluster;
   private FileSystem fs;
-  private AsyncDistributedFileSystem adfs;
 
   @Before
   public void setup() throws IOException {
 conf = new HdfsConfiguration();
 // explicitly turn on acl
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
-// explicitly turn on permission checking
-conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+// explicitly turn on ACL
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
 // set the limit of max async calls
 conf.setInt(CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
 ASYNC_CALL_LIMIT);
@@ -102,7 +86,6 @@ public class TestAsyncDFS {
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
 cluster.waitActive();
 fs = FileSystem.get(conf);
-adfs = cluster.getFileSystem().getAsyncDistributedFileSystem();
   }
 
   

[07/24] hadoop git commit: YARN-5176. More test cases for queuing of containers at the NM. (Konstantinos Karanasos via asuresh)

2016-06-09 Thread aengineer
YARN-5176. More test cases for queuing of containers at the NM. (Konstantinos 
Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76f0800c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76f0800c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76f0800c

Branch: refs/heads/HDFS-1312
Commit: 76f0800c21f49fba01694cbdc870103053da802c
Parents: 58be55b
Author: Arun Suresh 
Authored: Tue Jun 7 17:16:18 2016 -0700
Committer: Arun Suresh 
Committed: Tue Jun 7 17:16:18 2016 -0700

--
 .../queuing/QueuingContainerManagerImpl.java|  11 +
 .../BaseContainerManagerTest.java   |  64 +++
 .../containermanager/TestContainerManager.java  |  76 +---
 .../queuing/TestQueuingContainerManager.java| 388 +++
 4 files changed, 391 insertions(+), 148 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76f0800c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
index 1ce3356..a1e3bdb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
@@ -160,6 +160,7 @@ public class QueuingContainerManagerImpl extends 
ContainerManagerImpl {
   containerTokenId.getExecutionType());
 
   if (foundInQueue) {
+LOG.info("Removing queued container with ID " + containerID);
 this.context.getQueuingContext().getKilledQueuedContainers().put(
 containerTokenId,
 "Queued container request removed by ApplicationMaster.");
@@ -502,6 +503,16 @@ public class QueuingContainerManagerImpl extends 
ContainerManagerImpl {
 return allocatedOpportunisticContainers.size();
   }
 
+  @VisibleForTesting
+  public int getNumQueuedGuaranteedContainers() {
+return queuedGuaranteedContainers.size();
+  }
+
+  @VisibleForTesting
+  public int getNumQueuedOpportunisticContainers() {
+return queuedOpportunisticContainers.size();
+  }
+
   class QueuingApplicationEventDispatcher implements
   EventHandler {
 private EventHandler applicationEventDispatcher;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76f0800c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index ab60288..4f0e5c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -40,10 +40,17 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import 

[20/24] hadoop git commit: HADOOP-13175. Remove hadoop-ant from hadoop-tools. Contributed by Chris Douglas.

2016-06-09 Thread aengineer
HADOOP-13175. Remove hadoop-ant from hadoop-tools. Contributed by Chris Douglas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58c34868
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58c34868
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58c34868

Branch: refs/heads/HDFS-1312
Commit: 58c3486850c0503aecdeae8b67bb7e6bc42b4da8
Parents: 31ffaf7
Author: Andrew Wang 
Authored: Thu Jun 9 13:49:52 2016 -0700
Committer: Andrew Wang 
Committed: Thu Jun 9 13:49:52 2016 -0700

--
 hadoop-project/pom.xml  |   5 -
 hadoop-tools/hadoop-ant/pom.xml |  56 -
 .../java/org/apache/hadoop/ant/DfsTask.java | 220 ---
 .../ant/condition/DfsBaseConditional.java   |  68 --
 .../apache/hadoop/ant/condition/DfsExists.java  |  24 --
 .../apache/hadoop/ant/condition/DfsIsDir.java   |  24 --
 .../apache/hadoop/ant/condition/DfsZeroLen.java |  24 --
 .../resources/org/apache/hadoop/ant/antlib.xml  |  29 ---
 hadoop-tools/hadoop-tools-dist/pom.xml  |   6 -
 hadoop-tools/pom.xml|   1 -
 10 files changed, 457 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58c34868/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 4c618a1..35166b1 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -375,11 +375,6 @@
 hadoop-extras
 ${project.version}
   
-  
-org.apache.hadoop
-hadoop-ant
-${project.version}
-  
 
   
 org.apache.hadoop

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58c34868/hadoop-tools/hadoop-ant/pom.xml
--
diff --git a/hadoop-tools/hadoop-ant/pom.xml b/hadoop-tools/hadoop-ant/pom.xml
deleted file mode 100644
index e0b038e..000
--- a/hadoop-tools/hadoop-ant/pom.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-
-
-http://maven.apache.org/POM/4.0.0;
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-  http://maven.apache.org/xsd/maven-4.0.0.xsd;>
-  4.0.0
-  
-org.apache.hadoop
-hadoop-project
-3.0.0-alpha1-SNAPSHOT
-../../hadoop-project
-  
-  org.apache.hadoop
-  hadoop-ant
-  3.0.0-alpha1-SNAPSHOT
-  Apache Hadoop Ant Tasks
-  Apache Hadoop Ant Tasks
-  jar
-
-  
-
-  org.apache.ant
-  ant
-  provided
-
-
-  org.apache.hadoop
-  hadoop-annotations
-  provided
-
-
-  org.apache.hadoop
-  hadoop-common
-  provided
-
-
-  org.apache.hadoop
-  hadoop-hdfs-client
-  provided
-
-  
-
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58c34868/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java
--
diff --git 
a/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java 
b/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java
deleted file mode 100644
index 36119f5..000
--- a/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ant;
-
-import java.io.ByteArrayOutputStream;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.io.UnsupportedEncodingException;
-import java.security.AccessController;
-import java.security.PrivilegedAction;
-import java.util.LinkedList;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.tools.ant.AntClassLoader;
-import org.apache.tools.ant.BuildException;
-import org.apache.tools.ant.Task;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * {@link 

[01/24] hadoop git commit: HDFS-10468. HDFS read ends up ignoring an interrupt. Contributed by Jing Zhao

2016-06-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 4f6fe511c -> f56ab2e77


HDFS-10468. HDFS read ends up ignoring an interrupt. Contributed by Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be34e85e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be34e85e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be34e85e

Branch: refs/heads/HDFS-1312
Commit: be34e85e682880f46eee0310bf00ecc7d39cd5bd
Parents: c14c1b2
Author: Jing Zhao 
Authored: Tue Jun 7 10:48:21 2016 -0700
Committer: Jing Zhao 
Committed: Tue Jun 7 10:48:21 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 36 ++--
 .../java/org/apache/hadoop/hdfs/TestRead.java   | 87 
 .../server/datanode/SimulatedFSDataset.java |  4 +-
 3 files changed, 119 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be34e85e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 2ed0abd..7f32a56 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs;
 
 import java.io.EOFException;
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
+import java.nio.channels.ClosedByInterruptException;
 import java.util.AbstractMap;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -304,7 +306,7 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(waitTime);
 } catch (InterruptedException e) {
-  throw new IOException(
+  throw new InterruptedIOException(
   "Interrupted while getting the last block length.");
 }
   }
@@ -379,6 +381,7 @@ public class DFSInputStream extends FSInputStream
   return n;
 }
   } catch (IOException ioe) {
+checkInterrupted(ioe);
 if (ioe instanceof RemoteException) {
   if (((RemoteException) ioe).unwrapRemoteException() instanceof
   ReplicaNotFoundException) {
@@ -414,7 +417,8 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(500); // delay between retries.
 } catch (InterruptedException e) {
-  throw new IOException("Interrupted while getting the length.");
+  throw new InterruptedIOException(
+  "Interrupted while getting the length.");
 }
   }
 
@@ -660,6 +664,7 @@ public class DFSInputStream extends FSInputStream
 }
 return chosenNode;
   } catch (IOException ex) {
+checkInterrupted(ex);
 if (ex instanceof InvalidEncryptionKeyException && 
refetchEncryptionKey > 0) {
   DFSClient.LOG.info("Will fetch a new encryption key and retry, "
   + "encryption key was invalid when connecting to " + targetAddr
@@ -681,6 +686,15 @@ public class DFSInputStream extends FSInputStream
 }
   }
 
+  private void checkInterrupted(IOException e) throws IOException {
+if (Thread.currentThread().isInterrupted() &&
+(e instanceof ClosedByInterruptException ||
+e instanceof InterruptedIOException)) {
+  DFSClient.LOG.debug("The reading thread has been interrupted.", e);
+  throw e;
+}
+  }
+
   protected BlockReader getBlockReader(LocatedBlock targetBlock,
   long offsetInBlock, long length, InetSocketAddress targetAddr,
   StorageType storageType, DatanodeInfo datanode) throws IOException {
@@ -948,6 +962,7 @@ public class DFSInputStream extends FSInputStream
 } catch (ChecksumException ce) {
   throw ce;
 } catch (IOException e) {
+  checkInterrupted(e);
   if (retries == 1) {
 DFSClient.LOG.warn("DFS Read", e);
   }
@@ -1044,9 +1059,12 @@ public class DFSInputStream extends FSInputStream
   // expanding time window for each failure
   timeWindow * (failures + 1) *
   ThreadLocalRandom.current().nextDouble();
-  DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " 
IOException, will wait for " + waitTime + " msec.");
+  DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) +
+  " IOException, will wait for " + waitTime + " msec.");
   

[08/24] hadoop git commit: YARN-5206. RegistrySecurity includes id:pass in exception text if considered invalid. Contributed by Steve Loughran

2016-06-09 Thread aengineer
YARN-5206. RegistrySecurity includes id:pass in exception text if considered 
invalid. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c8a377c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c8a377c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c8a377c

Branch: refs/heads/HDFS-1312
Commit: 8c8a377cac10b086a7ff37ee366b79e6b04d2738
Parents: 723432b
Author: Jason Lowe 
Authored: Wed Jun 8 14:11:25 2016 +
Committer: Jason Lowe 
Committed: Wed Jun 8 14:11:25 2016 +

--
 .../apache/hadoop/registry/client/impl/zk/RegistrySecurity.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c8a377c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
index fc61460..49673fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
@@ -443,7 +443,7 @@ public class RegistrySecurity extends AbstractService {
*/
   public String digest(String idPasswordPair) throws IOException {
 if (StringUtils.isEmpty(idPasswordPair) || !isValid(idPasswordPair)) {
-  throw new IOException("Invalid id:password: " + idPasswordPair);
+  throw new IOException("Invalid id:password");
 }
 try {
   return DigestAuthenticationProvider.generateDigest(idPasswordPair);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/24] hadoop git commit: MAPREDUCE-6240. Hadoop client displays confusing error message. (gera)

2016-06-09 Thread aengineer
MAPREDUCE-6240. Hadoop client displays confusing error message. (gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0af96a1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0af96a1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0af96a1c

Branch: refs/heads/HDFS-1312
Commit: 0af96a1c08594c809ecb254cee4f60dd22399772
Parents: 1ee9ea0
Author: Gera Shegalov 
Authored: Sat May 28 22:01:07 2016 -0700
Committer: Gera Shegalov 
Committed: Wed Jun 8 12:59:37 2016 -0700

--
 .../org/apache/hadoop/mapreduce/Cluster.java| 15 ++-
 .../TestClientProtocolProviderImpls.java| 26 +---
 2 files changed, 32 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0af96a1c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
index 9563c0b..6ca918d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
@@ -102,6 +102,10 @@ public class Cluster {
   throws IOException {
 
 initProviderList();
+final IOException initEx = new IOException(
+"Cannot initialize Cluster. Please check your configuration for "
++ MRConfig.FRAMEWORK_NAME
++ " and the correspond server addresses.");
 for (ClientProtocolProvider provider : providerList) {
   LOG.debug("Trying ClientProtocolProvider : "
   + provider.getClass().getName());
@@ -124,16 +128,15 @@ public class Cluster {
   + " as the ClientProtocolProvider - returned null protocol");
 }
   } catch (Exception e) {
-LOG.info("Failed to use " + provider.getClass().getName()
-+ " due to error: ", e);
+final String errMsg = "Failed to use " + provider.getClass().getName()
++ " due to error: ";
+initEx.addSuppressed(new IOException(errMsg, e));
+LOG.info(errMsg, e);
   }
 }
 
 if (null == clientProtocolProvider || null == client) {
-  throw new IOException(
-  "Cannot initialize Cluster. Please check your configuration for "
-  + MRConfig.FRAMEWORK_NAME
-  + " and the correspond server addresses.");
+  throw initEx;
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0af96a1c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
index 6ad76e9..500e133 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
@@ -18,17 +18,20 @@
 
 package org.apache.hadoop.mapreduce;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.mapred.LocalJobRunner;
 import org.apache.hadoop.mapred.YARNRunner;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
 
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 public class TestClientProtocolProviderImpls {
 
   @Test
@@ -76,4 +79,21 @@ public class TestClientProtocolProviderImpls {
   "Cannot initialize Cluster. Please check"));
 }
   }
+
+  @Test
+  public void testClusterExceptionRootCause() throws Exception {
+final Configuration 

[19/24] hadoop git commit: HADOOP-12537 S3A to support Amazon STS temporary credentials. Contributed by Sean Mackrory.

2016-06-09 Thread aengineer
HADOOP-12537 S3A to support Amazon STS temporary credentials. Contributed by 
Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31ffaf76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31ffaf76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31ffaf76

Branch: refs/heads/HDFS-1312
Commit: 31ffaf76f2b6e1fd2a141daa4daaebdfecefe727
Parents: 9378d94
Author: Steve Loughran 
Authored: Thu Jun 9 20:58:30 2016 +0100
Committer: Steve Loughran 
Committed: Thu Jun 9 21:00:47 2016 +0100

--
 .../src/main/resources/core-default.xml |   5 +
 hadoop-project/pom.xml  |   8 +-
 hadoop-tools/hadoop-aws/pom.xml |   5 +
 .../fs/s3a/BasicAWSCredentialsProvider.java |   3 +-
 .../org/apache/hadoop/fs/s3a/Constants.java |   3 +
 .../s3a/CredentialInitializationException.java  |  46 ++
 .../fs/s3a/TemporaryAWSCredentialsProvider.java |  70 +
 .../src/site/markdown/tools/hadoop-aws/index.md |  71 -
 .../fs/s3a/TestS3ATemporaryCredentials.java | 150 +++
 9 files changed, 357 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 8bb27ea..39b7132 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -806,6 +806,11 @@
 
 
 
+  fs.s3a.session.token
+  The session token used with temporary credentials. Used only 
with provider 
org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider.
+
+
+
   fs.s3a.connection.maximum
   15
   Controls the maximum number of simultaneous connections to 
S3.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 2b6b162..4c618a1 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -116,6 +116,7 @@
 1.0-beta-1
 1.0-alpha-8
 900
+1.10.6
   
 
   
@@ -690,7 +691,12 @@
   
 com.amazonaws
 aws-java-sdk-s3
-1.10.6
+${aws-java-sdk.version}
+  
+  
+com.amazonaws
+aws-java-sdk-sts
+${aws-java-sdk.version}
   
   
 org.apache.mina

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index c95f1e6..7c25e60 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -231,6 +231,11 @@
   compile
 
 
+  com.amazonaws
+  aws-java-sdk-sts
+  test
+
+
   junit
   junit
   test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
index 3a5ee8c..61be43f 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.fs.s3a;
 
-import com.amazonaws.AmazonClientException;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.auth.BasicAWSCredentials;
 import com.amazonaws.auth.AWSCredentials;
@@ -49,7 +48,7 @@ public class BasicAWSCredentialsProvider implements 
AWSCredentialsProvider {
 if (!StringUtils.isEmpty(accessKey) && !StringUtils.isEmpty(secretKey)) {
   return new BasicAWSCredentials(accessKey, secretKey);
 }
-throw new AmazonClientException(
+throw new CredentialInitializationException(
 "Access key or secret key is null");
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 

[23/24] hadoop git commit: HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in Hadoop. Contributed by Vishwajeet Dusane.

2016-06-09 Thread aengineer
HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in Hadoop. 
Contributed by Vishwajeet Dusane.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9581fb71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9581fb71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9581fb71

Branch: refs/heads/HDFS-1312
Commit: 9581fb715cbc8a6ad28566e83c6d0242a7306688
Parents: e383b73
Author: Chris Nauroth 
Authored: Thu Jun 9 14:33:31 2016 -0700
Committer: Chris Nauroth 
Committed: Thu Jun 9 14:33:31 2016 -0700

--
 .../src/main/resources/core-default.xml |   60 +
 .../conf/TestCommonConfigurationFields.java |6 +
 hadoop-project/src/site/site.xml|2 +
 .../dev-support/findbugs-exclude.xml|   24 +
 hadoop-tools/hadoop-azure-datalake/pom.xml  |  180 +++
 .../main/java/org/apache/hadoop/fs/adl/Adl.java |   52 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |   41 +
 ...hedRefreshTokenBasedAccessTokenProvider.java |  135 +++
 .../hadoop/fs/adl/oauth2/package-info.java  |   23 +
 .../org/apache/hadoop/fs/adl/package-info.java  |   23 +
 .../org/apache/hadoop/hdfs/web/ADLConfKeys.java |   61 +
 .../apache/hadoop/hdfs/web/BufferManager.java   |  180 +++
 .../web/PrivateAzureDataLakeFileSystem.java | 1108 ++
 ...hedRefreshTokenBasedAccessTokenProvider.java |   37 +
 .../hadoop/hdfs/web/oauth2/package-info.java|   24 +
 .../apache/hadoop/hdfs/web/package-info.java|   25 +
 .../hadoop/hdfs/web/resources/ADLFlush.java |   49 +
 .../hdfs/web/resources/ADLGetOpParam.java   |   96 ++
 .../hdfs/web/resources/ADLPostOpParam.java  |   97 ++
 .../hdfs/web/resources/ADLPutOpParam.java   |   94 ++
 .../hdfs/web/resources/ADLVersionInfo.java  |   51 +
 .../web/resources/AppendADLNoRedirectParam.java |   45 +
 .../web/resources/CreateADLNoRedirectParam.java |   44 +
 .../hadoop/hdfs/web/resources/LeaseParam.java   |   53 +
 .../web/resources/ReadADLNoRedirectParam.java   |   44 +
 .../hadoop/hdfs/web/resources/package-info.java |   27 +
 .../src/site/markdown/index.md  |  219 
 ...hedRefreshTokenBasedAccessTokenProvider.java |  147 +++
 hadoop-tools/hadoop-tools-dist/pom.xml  |6 +
 hadoop-tools/pom.xml|1 +
 30 files changed, 2954 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9581fb71/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 39b7132..f1d77dd 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2213,4 +2213,64 @@
   needs to be specified in net.topology.script.file.name.
 
   
+
+
+  
+
+  
+adl.feature.override.readahead
+true
+
+  Enables read aheads in the ADL client, the feature is used to
+  improve read throughput.
+  This works in conjunction with the value set in
+  adl.feature.override.readahead.max.buffersize.
+  When set to false the read ahead feature is turned off.
+  Default : True if not configured.
+
+  
+
+  
+adl.feature.override.readahead.max.buffersize
+8388608
+
+  Define maximum buffer size to cache read ahead data, this is
+  allocated per process to
+  cache read ahead data. Applicable only when
+  adl.feature.override.readahead is set to true.
+  Default : 8388608 Byte i.e. 8MB if not configured.
+
+  
+
+  
+adl.feature.override.readahead.max.concurrent.connection
+2
+
+  Define maximum concurrent connection can be established to
+  read ahead. If the data size is less than 4MB then only 1 read n/w
+  connection
+  is set. If the data size is less than 4MB but less than 8MB then 2 read
+  n/w connection
+  is set. Data greater than 8MB then value set under the property would
+  take
+  effect. Applicable only when adl.feature.override.readahead is set
+  to true and buffer size is greater than 8MB.
+  It is recommended to reset this property if the
+  adl.feature.override.readahead.max.buffersize
+  is less than 8MB to gain performance. Application has to consider
+  throttling limit for the account as well before configuring large
+  buffer size.
+
+  
+
+  
+fs.adl.impl
+org.apache.hadoop.fs.adl.AdlFileSystem
+  
+
+  
+fs.AbstractFileSystem.adl.impl
+org.apache.hadoop.fs.adl.Adl
+  
+
 


[04/24] hadoop git commit: MAPREDUCE-6702. TestMiniMRChildTask.testTaskEnv and TestMiniMRChildTask.testTaskOldEnv are failing (ajisakaa via rkanter)

2016-06-09 Thread aengineer
MAPREDUCE-6702. TestMiniMRChildTask.testTaskEnv and 
TestMiniMRChildTask.testTaskOldEnv are failing (ajisakaa via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/733f3f18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/733f3f18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/733f3f18

Branch: refs/heads/HDFS-1312
Commit: 733f3f18d5cf38cbae35146fbef8e16e35fdf5e1
Parents: 620325e
Author: Robert Kanter 
Authored: Tue Jun 7 15:46:06 2016 -0700
Committer: Robert Kanter 
Committed: Tue Jun 7 15:46:06 2016 -0700

--
 .../src/site/markdown/SingleCluster.md.vm   |  12 +-
 .../java/org/apache/hadoop/mapred/JobConf.java  |   6 -
 .../hadoop/mapred/TestMiniMRChildTask.java  | 233 +++
 3 files changed, 41 insertions(+), 210 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/733f3f18/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm 
b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
index 573ca32..4825e00 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
@@ -181,13 +181,23 @@ You can run a MapReduce job on YARN in a 
pseudo-distributed mode by setting a fe
 
 The following instructions assume that 1. ~ 4. steps of [the above 
instructions](#Execution) are already executed.
 
-1.  Configure parameters as follows:`etc/hadoop/mapred-site.xml`:
+1.  Configure parameters as follows:
+
+`etc/hadoop/mapred-site.xml`:
 
 
 
 mapreduce.framework.name
 yarn
 
+
+mapreduce.admin.user.env
+HADOOP_MAPRED_HOME=$HADOOP_COMMON_HOME
+
+
+yarn.app.mapreduce.am.env
+HADOOP_MAPRED_HOME=$HADOOP_COMMON_HOME
+
 
 
 `etc/hadoop/yarn-site.xml`:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/733f3f18/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index 2cfce1f..f2b0aae 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -294,8 +294,6 @@ public class JobConf extends Configuration {
* Example:
* 
*A=foo - This will set the env variable A to foo. 
-   *B=$X:c This is inherit tasktracker's X env variable on Linux. 
-   *B=%X%;c This is inherit tasktracker's X env variable on Windows. 

* 
* 
* @deprecated Use {@link #MAPRED_MAP_TASK_ENV} or 
@@ -314,8 +312,6 @@ public class JobConf extends Configuration {
* Example:
* 
*A=foo - This will set the env variable A to foo. 
-   *B=$X:c This is inherit tasktracker's X env variable on Linux. 
-   *B=%X%;c This is inherit tasktracker's X env variable on Windows. 

* 
*/
   public static final String MAPRED_MAP_TASK_ENV = JobContext.MAP_ENV;
@@ -330,8 +326,6 @@ public class JobConf extends Configuration {
* Example:
* 
*A=foo - This will set the env variable A to foo. 
-   *B=$X:c This is inherit tasktracker's X env variable on Linux. 
-   *B=%X%;c This is inherit tasktracker's X env variable on Windows. 

* 
*/
   public static final String MAPRED_REDUCE_TASK_ENV = JobContext.REDUCE_ENV;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/733f3f18/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
index cbeeccf..f690118 100644
--- 

[06/24] hadoop git commit: YARN-5199. Close LogReader in in AHSWebServices#getStreamingOutput and FileInputStream in NMWebServices#getLogs. Contributed by Xuan Gong

2016-06-09 Thread aengineer
YARN-5199. Close LogReader in in AHSWebServices#getStreamingOutput and
FileInputStream in NMWebServices#getLogs. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58be55b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58be55b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58be55b6

Branch: refs/heads/HDFS-1312
Commit: 58be55b6e07b94aa55ed87c461f3e5c04cc61630
Parents: 8554aee1b
Author: Xuan 
Authored: Tue Jun 7 16:07:02 2016 -0700
Committer: Xuan 
Committed: Tue Jun 7 16:07:02 2016 -0700

--
 .../webapp/AHSWebServices.java  | 155 ++-
 .../nodemanager/webapp/NMWebServices.java   |  71 +
 2 files changed, 118 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58be55b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index d91ae55..59dbd44 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -40,7 +40,6 @@ import javax.ws.rs.core.Response;
 import javax.ws.rs.core.StreamingOutput;
 import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.Response.Status;
-
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -363,86 +362,94 @@ public class AHSWebServices extends WebServices {
   if ((nodeId == null || nodeName.contains(LogAggregationUtils
   .getNodeString(nodeId))) && !nodeName.endsWith(
   LogAggregationUtils.TMP_FILE_SUFFIX)) {
-AggregatedLogFormat.LogReader reader =
-new AggregatedLogFormat.LogReader(conf,
-thisNodeFile.getPath());
-DataInputStream valueStream;
-LogKey key = new LogKey();
-valueStream = reader.next(key);
-while (valueStream != null && !key.toString()
-.equals(containerIdStr)) {
-  // Next container
-  key = new LogKey();
+AggregatedLogFormat.LogReader reader = null;
+try {
+  reader = new AggregatedLogFormat.LogReader(conf,
+  thisNodeFile.getPath());
+  DataInputStream valueStream;
+  LogKey key = new LogKey();
   valueStream = reader.next(key);
-}
-if (valueStream == null) {
-  continue;
-}
-while (true) {
-  try {
-String fileType = valueStream.readUTF();
-String fileLengthStr = valueStream.readUTF();
-long fileLength = Long.parseLong(fileLengthStr);
-if (fileType.equalsIgnoreCase(logFile)) {
-  StringBuilder sb = new StringBuilder();
-  sb.append("LogType:");
-  sb.append(fileType + "\n");
-  sb.append("Log Upload Time:");
-  sb.append(Times.format(System.currentTimeMillis()) + "\n");
-  sb.append("LogLength:");
-  sb.append(fileLengthStr + "\n");
-  sb.append("Log Contents:\n");
-  byte[] b = sb.toString().getBytes(Charset.forName("UTF-8"));
-  os.write(b, 0, b.length);
-
-  long toSkip = 0;
-  long totalBytesToRead = fileLength;
-  if (bytes < 0) {
-long absBytes = Math.abs(bytes);
-if (absBytes < fileLength) {
-  toSkip = fileLength - absBytes;
-  totalBytesToRead = absBytes;
+  while (valueStream != null && !key.toString()
+  .equals(containerIdStr)) {
+// Next container
+key = new 

[05/24] hadoop git commit: Addendum patch for YARN-5180 updating findbugs-exclude.xml

2016-06-09 Thread aengineer
Addendum patch for YARN-5180 updating findbugs-exclude.xml


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8554aee1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8554aee1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8554aee1

Branch: refs/heads/HDFS-1312
Commit: 8554aee1bef5aff9e49e5e9119d6a7a4abf1c432
Parents: 733f3f1
Author: Arun Suresh 
Authored: Tue Jun 7 15:59:13 2016 -0700
Committer: Arun Suresh 
Committed: Tue Jun 7 15:59:43 2016 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml| 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8554aee1/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 81c7e6a..6998d75 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -517,6 +517,11 @@
   
 
   
+
+
+
+  
+  
 
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/24] hadoop git commit: HDFS-10508. DFSInputStream should set thread's interrupt status after catching InterruptException from sleep. Contributed by Jing Zhao.

2016-06-09 Thread aengineer
HDFS-10508. DFSInputStream should set thread's interrupt status after catching 
InterruptException from sleep. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ea9bbce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ea9bbce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ea9bbce

Branch: refs/heads/HDFS-1312
Commit: 8ea9bbce2614e8eb499af73589f021ed1789e78f
Parents: 1500a0a
Author: Masatake Iwasaki 
Authored: Thu Jun 9 14:52:29 2016 +0900
Committer: Masatake Iwasaki 
Committed: Thu Jun 9 14:52:29 2016 +0900

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ea9bbce/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 7f32a56..6132f83 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -306,6 +306,7 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(waitTime);
 } catch (InterruptedException e) {
+  Thread.currentThread().interrupt();
   throw new InterruptedIOException(
   "Interrupted while getting the last block length.");
 }
@@ -417,6 +418,7 @@ public class DFSInputStream extends FSInputStream
 try {
   Thread.sleep(500); // delay between retries.
 } catch (InterruptedException e) {
+  Thread.currentThread().interrupt();
   throw new InterruptedIOException(
   "Interrupted while getting the length.");
 }
@@ -1063,6 +1065,7 @@ public class DFSInputStream extends FSInputStream
   " IOException, will wait for " + waitTime + " msec.");
   Thread.sleep((long)waitTime);
 } catch (InterruptedException e) {
+  Thread.currentThread().interrupt();
   throw new InterruptedIOException(
   "Interrupted while choosing DataNode for read.");
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/24] hadoop git commit: HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in Hadoop. Contributed by Vishwajeet Dusane.

2016-06-09 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9581fb71/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
new file mode 100644
index 000..7f7e749
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/**
+ * Extended Webhdfs PostOpParam to avoid redirect during append operation for
+ * azure data lake storage.
+ */
+
+public class ADLPostOpParam extends HttpOpParam {
+  private static final Domain DOMAIN = new Domain(NAME,
+  Op.class);
+
+  /**
+   * Constructor.
+   *
+   * @param str a string representation of the parameter value.
+   */
+  public ADLPostOpParam(final String str) {
+super(DOMAIN, DOMAIN.parse(str));
+  }
+
+  @Override
+  public final String getName() {
+return NAME;
+  }
+
+  /**
+   * Post operations.
+   */
+  public static enum Op implements HttpOpParam.Op {
+APPEND(true, false, HttpURLConnection.HTTP_OK);
+
+private final boolean redirect;
+private final boolean doOutput;
+private final int expectedHttpResponseCode;
+
+Op(final boolean doOut, final boolean doRedirect,
+final int expectHttpResponseCode) {
+  this.doOutput = doOut;
+  this.redirect = doRedirect;
+  this.expectedHttpResponseCode = expectHttpResponseCode;
+}
+
+@Override
+public Type getType() {
+  return Type.POST;
+}
+
+@Override
+public boolean getRequireAuth() {
+  return false;
+}
+
+@Override
+public boolean getDoOutput() {
+  return doOutput;
+}
+
+@Override
+public boolean getRedirect() {
+  return redirect;
+}
+
+@Override
+public int getExpectedHttpResponseCode() {
+  return expectedHttpResponseCode;
+}
+
+/**
+ * @return a URI query string.
+ */
+@Override
+public String toQueryString() {
+  return NAME + "=" + this;
+}
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9581fb71/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
new file mode 100644
index 000..d300a1c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/**
+ * Extended Webhdfs PutOpParam to avoid redirect during Create operation for
+ * azure data lake storage.
+ */
+public class ADLPutOpParam extends HttpOpParam {
+  private static final Domain DOMAIN = new Domain(NAME, Op.class);
+
+  

[02/24] hadoop git commit: YARN-4837. User facing aspects of 'AM blacklisting' feature need fixing. (vinodkv via wangda)

2016-06-09 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/620325e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
index 60b728e..e8c8bca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
@@ -42,7 +42,7 @@ public class AppAttemptInfo {
   protected String nodeId;
   protected String logsLink;
   protected String blacklistedNodes;
-  protected String rmBlacklistedNodesForAMLaunches;
+  private String nodesBlacklistedBySystem;
   protected String appAttemptId;
 
   public AppAttemptInfo() {
@@ -69,9 +69,9 @@ public class AppAttemptInfo {
 + masterContainer.getNodeHttpAddress(),
 ConverterUtils.toString(masterContainer.getId()), user);
 
-rmBlacklistedNodesForAMLaunches = StringUtils.join(
-attempt.getAMBlacklist().getBlacklistUpdates().getAdditions(),
-", ");
+nodesBlacklistedBySystem =
+StringUtils.join(attempt.getAMBlacklistManager()
+  .getBlacklistUpdates().getBlacklistAdditions(), ", ");
 if (rm.getResourceScheduler() instanceof AbstractYarnScheduler) {
   AbstractYarnScheduler ayScheduler =
   (AbstractYarnScheduler) rm.getResourceScheduler();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/620325e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
index 4cbe7a8..3d95ca1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
@@ -87,9 +87,6 @@ public class ApplicationSubmissionContextInfo {
   @XmlElement(name = "reservation-id")
   String reservationId;
 
-  @XmlElement(name = "am-black-listing-requests")
-  AMBlackListingRequestInfo amBlackListingRequestInfo;
-
   public ApplicationSubmissionContextInfo() {
 applicationId = "";
 applicationName = "";
@@ -106,7 +103,6 @@ public class ApplicationSubmissionContextInfo {
 logAggregationContextInfo = null;
 attemptFailuresValidityInterval = -1;
 reservationId = "";
-amBlackListingRequestInfo = null;
   }
 
   public String getApplicationId() {
@@ -173,10 +169,6 @@ public class ApplicationSubmissionContextInfo {
 return attemptFailuresValidityInterval;
   }
 
-  public AMBlackListingRequestInfo getAMBlackListingRequestInfo() {
-return amBlackListingRequestInfo;
-  }
-
   public String getReservationId() {
 return reservationId;
   }
@@ -252,9 +244,4 @@ public class ApplicationSubmissionContextInfo {
   public void setReservationId(String reservationId) {
 this.reservationId = reservationId;
   }
-
-  public void setAMBlackListingRequestInfo(
-  AMBlackListingRequestInfo amBlackListingRequestInfo) {
-this.amBlackListingRequestInfo = amBlackListingRequestInfo;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/620325e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
--
diff --git 

[10/24] hadoop git commit: YARN-5080. Addendum fix to the original patch to fix YARN logs CLI. Contributed by Xuan Gong

2016-06-09 Thread aengineer
YARN-5080. Addendum fix to the original patch to fix YARN logs CLI. Contributed 
by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a43583c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a43583c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a43583c

Branch: refs/heads/HDFS-1312
Commit: 5a43583c0bbb9650ea6a9f48d9544ec3ec24b580
Parents: 3344ba7
Author: Vinod Kumar Vavilapalli 
Authored: Wed Jun 8 09:49:55 2016 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Wed Jun 8 09:49:55 2016 -0700

--
 .../src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a43583c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index bbe636f..d62ee5e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -278,7 +278,9 @@ public class LogsCLI extends Configured implements Tool {
   Configuration conf, String appId) throws ClientHandlerException,
   UniformInterfaceException, JSONException {
 Client webServiceClient = Client.create();
-String webAppAddress = WebAppUtils.getRMWebAppURLWithScheme(conf);
+String webAppAddress = WebAppUtils.getHttpSchemePrefix(conf) +
+WebAppUtils.getWebAppBindURL(conf, YarnConfiguration.RM_BIND_HOST,
+WebAppUtils.getRMWebAppURLWithoutScheme(conf));
 WebResource webResource = webServiceClient.resource(webAppAddress);
 
 ClientResponse response =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/24] hadoop git commit: Merge branch 'trunk' into HDFS-1312

2016-06-09 Thread aengineer
Merge branch 'trunk' into HDFS-1312


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f56ab2e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f56ab2e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f56ab2e7

Branch: refs/heads/HDFS-1312
Commit: f56ab2e77902e32986d225f9dd06352f56e617a5
Parents: 4f6fe51 9581fb7
Author: Anu Engineer 
Authored: Thu Jun 9 20:02:22 2016 -0700
Committer: Anu Engineer 
Committed: Thu Jun 9 20:02:22 2016 -0700

--
 LICENSE.txt | 1017 +++-
 NOTICE.txt  |  266 +
 hadoop-build-tools/pom.xml  |   41 +
 hadoop-common-project/hadoop-auth/pom.xml   |1 -
 hadoop-common-project/hadoop-common/pom.xml |1 -
 .../src/main/resources/core-default.xml |   78 +-
 .../src/site/markdown/SingleCluster.md.vm   |   12 +-
 .../conf/TestCommonConfigurationFields.java |6 +
 .../dev-support/findbugsExcludeFile.xml |   28 +
 hadoop-common-project/hadoop-minikdc/pom.xml|   14 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |   39 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   10 +
 .../hdfs/server/common/HdfsServerConstants.java |1 -
 .../hdfs/server/namenode/FSNamesystem.java  |   42 +-
 .../hdfs/server/namenode/LeaseManager.java  |   21 +-
 .../src/main/resources/hdfs-default.xml |   18 +
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  |   31 -
 .../java/org/apache/hadoop/hdfs/TestRead.java   |   87 ++
 .../server/datanode/SimulatedFSDataset.java |4 +-
 .../hdfs/server/namenode/TestLeaseManager.java  |   24 +-
 .../java/org/apache/hadoop/mapred/JobConf.java  |6 -
 .../org/apache/hadoop/mapreduce/Cluster.java|   15 +-
 .../hadoop/mapred/TestMiniMRChildTask.java  |  233 +---
 .../TestClientProtocolProviderImpls.java|   26 +-
 hadoop-project-dist/pom.xml |2 +
 hadoop-project/pom.xml  |   38 +-
 hadoop-project/src/site/site.xml|2 +
 hadoop-tools/hadoop-ant/pom.xml |   56 -
 .../java/org/apache/hadoop/ant/DfsTask.java |  220 
 .../ant/condition/DfsBaseConditional.java   |   68 --
 .../apache/hadoop/ant/condition/DfsExists.java  |   24 -
 .../apache/hadoop/ant/condition/DfsIsDir.java   |   24 -
 .../apache/hadoop/ant/condition/DfsZeroLen.java |   24 -
 .../resources/org/apache/hadoop/ant/antlib.xml  |   29 -
 hadoop-tools/hadoop-aws/pom.xml |5 +
 .../fs/s3a/AnonymousAWSCredentialsProvider.java |   11 +
 .../fs/s3a/BasicAWSCredentialsProvider.java |   11 +-
 .../org/apache/hadoop/fs/s3a/Constants.java |3 +
 .../s3a/CredentialInitializationException.java  |   46 +
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |   22 +-
 .../fs/s3a/TemporaryAWSCredentialsProvider.java |   70 ++
 .../src/site/markdown/tools/hadoop-aws/index.md |   85 +-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |   55 +
 .../fs/s3a/TestS3ATemporaryCredentials.java |  150 +++
 .../dev-support/findbugs-exclude.xml|   24 +
 hadoop-tools/hadoop-azure-datalake/pom.xml  |  180 +++
 .../main/java/org/apache/hadoop/fs/adl/Adl.java |   52 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |   41 +
 ...hedRefreshTokenBasedAccessTokenProvider.java |  135 +++
 .../hadoop/fs/adl/oauth2/package-info.java  |   23 +
 .../org/apache/hadoop/fs/adl/package-info.java  |   23 +
 .../org/apache/hadoop/hdfs/web/ADLConfKeys.java |   61 +
 .../apache/hadoop/hdfs/web/BufferManager.java   |  180 +++
 .../web/PrivateAzureDataLakeFileSystem.java | 1108 ++
 ...hedRefreshTokenBasedAccessTokenProvider.java |   37 +
 .../hadoop/hdfs/web/oauth2/package-info.java|   24 +
 .../apache/hadoop/hdfs/web/package-info.java|   25 +
 .../hadoop/hdfs/web/resources/ADLFlush.java |   49 +
 .../hdfs/web/resources/ADLGetOpParam.java   |   96 ++
 .../hdfs/web/resources/ADLPostOpParam.java  |   97 ++
 .../hdfs/web/resources/ADLPutOpParam.java   |   94 ++
 .../hdfs/web/resources/ADLVersionInfo.java  |   51 +
 .../web/resources/AppendADLNoRedirectParam.java |   45 +
 .../web/resources/CreateADLNoRedirectParam.java |   44 +
 .../hadoop/hdfs/web/resources/LeaseParam.java   |   53 +
 .../web/resources/ReadADLNoRedirectParam.java   |   44 +
 .../hadoop/hdfs/web/resources/package-info.java |   27 +
 .../src/site/markdown/index.md  |  219 
 ...hedRefreshTokenBasedAccessTokenProvider.java |  147 +++
 hadoop-tools/hadoop-tools-dist/pom.xml  |4 +-
 hadoop-tools/pom.xml|2 +-
 .../dev-support/findbugs-exclude.xml|5 +
 .../yarn/api/records/AMBlackListingRequest.java |   67 --
 

[13/24] hadoop git commit: HDFS-10220. A large number of expired leases can make namenode unresponsive and cause failover (Nicolas Fraison via raviprak)

2016-06-09 Thread aengineer
HDFS-10220. A large number of expired leases can make namenode unresponsive and 
cause failover (Nicolas Fraison via raviprak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae047655
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae047655
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae047655

Branch: refs/heads/HDFS-1312
Commit: ae047655f4355288406cd5396fb4e3ea7c307b14
Parents: 0af96a1
Author: Ravi Prakash 
Authored: Wed Jun 8 13:44:22 2016 -0700
Committer: Ravi Prakash 
Committed: Wed Jun 8 13:44:22 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 10 +
 .../hdfs/server/common/HdfsServerConstants.java |  1 -
 .../hdfs/server/namenode/FSNamesystem.java  | 42 
 .../hdfs/server/namenode/LeaseManager.java  | 21 --
 .../src/main/resources/hdfs-default.xml | 18 +
 .../hdfs/server/namenode/TestLeaseManager.java  | 24 ++-
 6 files changed, 94 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae047655/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 19e1791..f18a6c6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -397,6 +397,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
   public static final int DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT = 32768;
 
+  public static final String  DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY =
+  "dfs.namenode.lease-recheck-interval-ms";
+  public static final longDFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_DEFAULT =
+  2000;
+  public static final String
+  DFS_NAMENODE_MAX_LOCK_HOLD_TO_RELEASE_LEASE_MS_KEY =
+  "dfs.namenode.max-lock-hold-to-release-lease-ms";
+  public static final long
+  DFS_NAMENODE_MAX_LOCK_HOLD_TO_RELEASE_LEASE_MS_DEFAULT = 25;
+
   public static final String  DFS_UPGRADE_DOMAIN_FACTOR = 
"dfs.namenode.upgrade.domain.factor";
   public static final int DFS_UPGRADE_DOMAIN_FACTOR_DEFAULT = 
DFS_REPLICATION_DEFAULT;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae047655/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index b2dda3c..3798394 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -361,7 +361,6 @@ public interface HdfsServerConstants {
   }
   
   String NAMENODE_LEASE_HOLDER = "HDFS_NameNode";
-  long NAMENODE_LEASE_RECHECK_INTERVAL = 2000;
 
   String CRYPTO_XATTR_ENCRYPTION_ZONE =
   "raw.hdfs.crypto.encryption.zone";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae047655/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index c9f2487..915ae97 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -76,6 +76,10 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPI
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY;
+import static 

[11/24] hadoop git commit: Remove redundant TestMiniDFSCluster.testDualClusters. Contributed by Jiayi Zhou.

2016-06-09 Thread aengineer
Remove redundant TestMiniDFSCluster.testDualClusters. Contributed by Jiayi Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ee9ea00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ee9ea00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ee9ea00

Branch: refs/heads/HDFS-1312
Commit: 1ee9ea002609971ad58082bf525d57fca8a37035
Parents: 5a43583
Author: Andrew Wang 
Authored: Wed Jun 8 12:58:56 2016 -0700
Committer: Andrew Wang 
Committed: Wed Jun 8 12:58:56 2016 -0700

--
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  | 31 
 1 file changed, 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ee9ea00/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index 78ae8b1..ec72d87 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assume.assumeTrue;
 
 import java.io.File;
@@ -84,36 +83,6 @@ public class TestMiniDFSCluster {
 }
   }
 
-  /**
-   * Bring up two clusters and assert that they are in different directories.
-   * @throws Throwable on a failure
-   */
-  @Test(timeout=10)
-  public void testDualClusters() throws Throwable {
-File testDataCluster2 = new File(testDataPath, CLUSTER_2);
-File testDataCluster3 = new File(testDataPath, CLUSTER_3);
-Configuration conf = new HdfsConfiguration();
-String c2Path = testDataCluster2.getAbsolutePath();
-conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c2Path);
-MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).build();
-MiniDFSCluster cluster3 = null;
-try {
-  String dataDir2 = cluster2.getDataDirectory();
-  assertEquals(new File(c2Path + "/data"), new File(dataDir2));
-  //change the data dir
-  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
-   testDataCluster3.getAbsolutePath());
-  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
-  cluster3 = builder.build();
-  String dataDir3 = cluster3.getDataDirectory();
-  assertTrue("Clusters are bound to the same directory: " + dataDir2,
-!dataDir2.equals(dataDir3));
-} finally {
-  MiniDFSCluster.shutdownCluster(cluster3);
-  MiniDFSCluster.shutdownCluster(cluster2);
-}
-  }
-
   @Test(timeout=10)
   public void testIsClusterUpAfterShutdown() throws Throwable {
 Configuration conf = new HdfsConfiguration();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/24] hadoop git commit: HADOOP-13220. Follow on fixups after upgraded mini-kdc using Kerby. Contributed by Jiajia Li

2016-06-09 Thread aengineer
HADOOP-13220. Follow on fixups after upgraded mini-kdc using Kerby. Contributed 
by Jiajia Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/723432b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/723432b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/723432b3

Branch: refs/heads/HDFS-1312
Commit: 723432b3387fe69e6bf2b56d2ea1a7b1bda16b97
Parents: 76f0800
Author: Kai Zheng 
Authored: Thu Jun 9 15:56:12 2016 +0800
Committer: Kai Zheng 
Committed: Thu Jun 9 15:56:12 2016 +0800

--
 hadoop-common-project/hadoop-auth/pom.xml   |  1 -
 hadoop-common-project/hadoop-common/pom.xml |  1 -
 .../dev-support/findbugsExcludeFile.xml | 28 
 hadoop-common-project/hadoop-minikdc/pom.xml| 14 +-
 hadoop-project/pom.xml  |  6 +
 5 files changed, 47 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/723432b3/hadoop-common-project/hadoop-auth/pom.xml
--
diff --git a/hadoop-common-project/hadoop-auth/pom.xml 
b/hadoop-common-project/hadoop-auth/pom.xml
index 27e4547..93dceb3 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -134,7 +134,6 @@
 
   org.apache.kerby
   kerb-simplekdc
-  1.0.0-RC2
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/723432b3/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 8bf052c..059986f 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -298,7 +298,6 @@
 
   org.apache.kerby
   kerb-simplekdc
-  1.0.0-RC2
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/723432b3/hadoop-common-project/hadoop-minikdc/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-common-project/hadoop-minikdc/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-minikdc/dev-support/findbugsExcludeFile.xml
new file mode 100644
index 000..9a1c4a7
--- /dev/null
+++ b/hadoop-common-project/hadoop-minikdc/dev-support/findbugsExcludeFile.xml
@@ -0,0 +1,28 @@
+
+
+  
+  
+
+
+
+  
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/723432b3/hadoop-common-project/hadoop-minikdc/pom.xml
--
diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml 
b/hadoop-common-project/hadoop-minikdc/pom.xml
index 2e22ad0..3075cad 100644
--- a/hadoop-common-project/hadoop-minikdc/pom.xml
+++ b/hadoop-common-project/hadoop-minikdc/pom.xml
@@ -38,7 +38,6 @@
 
   org.apache.kerby
   kerb-simplekdc
-  1.0.0-RC2
 
 
   org.slf4j
@@ -51,4 +50,17 @@
   compile
 
   
+
+  
+
+  
+org.codehaus.mojo
+findbugs-maven-plugin
+
+  ${basedir}/dev-support/findbugsExcludeFile.xml
+  
+
+  
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/723432b3/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index aa47f6c..2b6b162 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1011,6 +1011,12 @@
 1.3.0
 
 
+
+  org.apache.kerby
+  kerb-simplekdc
+  1.0.0-RC2
+
+
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/24] hadoop git commit: HADOOP-12893. Verify LICENSE.txt and NOTICE.txt. Contributed by Xiao Chen, Akira Ajisaka, and Andrew Wang.

2016-06-09 Thread aengineer
HADOOP-12893. Verify LICENSE.txt and NOTICE.txt. Contributed by Xiao Chen, 
Akira Ajisaka, and Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e383b732
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e383b732
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e383b732

Branch: refs/heads/HDFS-1312
Commit: e383b732c54c542482b0b836e2d2c46eb49b4e2d
Parents: 58c3486
Author: Andrew Wang 
Authored: Thu Jun 9 13:54:14 2016 -0700
Committer: Andrew Wang 
Committed: Thu Jun 9 13:54:14 2016 -0700

--
 LICENSE.txt | 1017 +-
 NOTICE.txt  |  266 ++
 hadoop-build-tools/pom.xml  |   41 ++
 hadoop-project-dist/pom.xml |2 +
 hadoop-project/pom.xml  |   19 +-
 pom.xml |   26 +
 6 files changed, 1367 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e383b732/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 929e2a8..44880df 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -320,7 +320,9 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 
OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-For com.google.re2j.* classes:
+The binary distribution of this product bundles these dependencies under the
+following license:
+re2j 1.0
 -
 This is a work derived from Russ Cox's RE2 in Go, whose license
 http://golang.org/LICENSE is as follows:
@@ -548,12 +550,14 @@ 
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
 hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
 hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
+And the binary distribution of this product bundles these dependencies under 
the
+following license:
+Mockito 1.8.5
+SLF4J 1.7.10
 

 
 The MIT License (MIT)
 
-Copyright (c) 2011-2016 Twitter, Inc.
-
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
 in the Software without restriction, including without limitation the rights
@@ -648,3 +652,1010 @@ 
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3.v3.js
 
 D3 is available under a 3-clause BSD license. For details, see:
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3-LICENSE
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+HSQLDB Database 2.0.0
+
+"COPYRIGHTS AND LICENSES (based on BSD License)
+
+For work developed by the HSQL Development Group:
+
+Copyright (c) 2001-2016, The HSQL Development Group
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+Neither the name of the HSQL Development Group nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS""
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG,
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+For work originally developed by the Hypersonic SQL Group:
+
+Copyright (c) 1995-2000 by the Hypersonic SQL Group.
+All rights reserved.
+Redistribution and use in source and binary forms, with or without

[09/24] hadoop git commit: YARN-5204. Properly report status of killed/stopped queued containers. (Konstantinos Karanasos via asuresh)

2016-06-09 Thread aengineer
YARN-5204. Properly report status of killed/stopped queued containers. 
(Konstantinos Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3344ba70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3344ba70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3344ba70

Branch: refs/heads/HDFS-1312
Commit: 3344ba70e027c929e07bad5e6877c796d41181e9
Parents: 8c8a377
Author: Arun Suresh 
Authored: Wed Jun 8 08:31:32 2016 -0700
Committer: Arun Suresh 
Committed: Wed Jun 8 08:31:32 2016 -0700

--
 .../queuing/QueuingContainerManagerImpl.java|  15 ++-
 .../queuing/TestQueuingContainerManager.java| 129 +++
 2 files changed, 115 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3344ba70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
index a1e3bdb..38b1b07 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
@@ -175,8 +175,9 @@ public class QueuingContainerManagerImpl extends 
ContainerManagerImpl {
   }
 
   nodeStatusUpdater.sendOutofBandHeartBeat();
+} else {
+  super.stopContainerInternal(containerID);
 }
-super.stopContainerInternal(containerID);
   }
 
   /**
@@ -456,6 +457,18 @@ public class QueuingContainerManagerImpl extends 
ContainerManagerImpl {
 ContainerExitStatus.INVALID, this.context.getQueuingContext()
 .getQueuedContainers().get(containerID).getResource(),
 executionType);
+  } else {
+// Check if part of the stopped/killed queued containers.
+for (ContainerTokenIdentifier cTokenId : this.context
+.getQueuingContext().getKilledQueuedContainers().keySet()) {
+  if (cTokenId.getContainerID().equals(containerID)) {
+return BuilderUtils.newContainerStatus(containerID,
+org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE,
+this.context.getQueuingContext().getKilledQueuedContainers()
+.get(cTokenId), ContainerExitStatus.ABORTED, cTokenId
+.getResource(), cTokenId.getExecutionType());
+  }
+}
   }
 }
 return super.getContainerStatusInternal(containerID, nmTokenIdentifier);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3344ba70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/TestQueuingContainerManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/TestQueuingContainerManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/TestQueuingContainerManager.java
index 4d44d8d..caebef7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/TestQueuingContainerManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/TestQueuingContainerManager.java
@@ -24,13 +24,13 @@ import java.util.Arrays;
 import java.util.List;
 
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
 import 

[03/24] hadoop git commit: YARN-4837. User facing aspects of 'AM blacklisting' feature need fixing. (vinodkv via wangda)

2016-06-09 Thread aengineer
YARN-4837. User facing aspects of 'AM blacklisting' feature need fixing. 
(vinodkv via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/620325e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/620325e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/620325e8

Branch: refs/heads/HDFS-1312
Commit: 620325e81696fca140195b74929ed9eda2d5eb16
Parents: be34e85
Author: Wangda Tan 
Authored: Tue Jun 7 15:06:42 2016 -0700
Committer: Wangda Tan 
Committed: Tue Jun 7 15:06:42 2016 -0700

--
 .../yarn/api/records/AMBlackListingRequest.java |  67 -
 .../records/ApplicationSubmissionContext.java   |  23 --
 .../hadoop/yarn/conf/YarnConfiguration.java |  25 +-
 .../src/main/proto/yarn_protos.proto|   5 -
 .../yarn/conf/TestYarnConfigurationFields.java  |   7 +
 .../impl/pb/AMBlackListingRequestPBImpl.java| 104 
 .../pb/ApplicationSubmissionContextPBImpl.java  |  40 ---
 .../src/main/resources/yarn-default.xml |  19 --
 .../hadoop/yarn/api/TestPBImplRecords.java  |  10 -
 .../blacklist/BlacklistManager.java |   9 +-
 .../blacklist/BlacklistUpdates.java |  47 
 .../blacklist/DisabledBlacklistManager.java |  12 +-
 .../blacklist/SimpleBlacklistManager.java   |  17 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  79 ++
 .../rmapp/attempt/RMAppAttempt.java |   2 +-
 .../rmapp/attempt/RMAppAttemptImpl.java |  85 +--
 .../scheduler/AbstractYarnScheduler.java|   2 +-
 .../scheduler/AppSchedulingInfo.java|  74 +++---
 .../scheduler/SchedulerAppUtils.java|  16 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  33 ++-
 .../scheduler/capacity/CapacityScheduler.java   |  11 +-
 .../allocator/RegularContainerAllocator.java|   2 +-
 .../scheduler/fair/FSLeafQueue.java |   2 +-
 .../scheduler/fair/FairScheduler.java   |   8 +-
 .../scheduler/fifo/FifoScheduler.java   |  12 +-
 .../webapp/RMAppAttemptBlock.java   |   9 +-
 .../resourcemanager/webapp/RMAppBlock.java  |  13 +-
 .../resourcemanager/webapp/RMWebServices.java   |  21 +-
 .../webapp/dao/AMBlackListingRequestInfo.java   |  61 -
 .../webapp/dao/AppAttemptInfo.java  |   8 +-
 .../dao/ApplicationSubmissionContextInfo.java   |  13 -
 .../TestNodeBlacklistingOnAMFailures.java   | 251 +++
 .../applicationsmanager/TestAMRestart.java  | 177 +
 .../blacklist/TestBlacklistManager.java |  29 +--
 .../rmapp/TestRMAppTransitions.java |  58 -
 .../scheduler/TestAppSchedulingInfo.java|  12 +-
 .../capacity/TestCapacityScheduler.java |   8 +-
 .../scheduler/fair/TestFSAppAttempt.java|  12 +-
 .../scheduler/fair/TestFairScheduler.java   |   9 +-
 .../TestRMWebServicesAppsModification.java  |  39 ++-
 40 files changed, 536 insertions(+), 895 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/620325e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
deleted file mode 100644
index 4aec2ba..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.api.records;
-
-import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import 

[18/24] hadoop git commit: YARN-5191. Renamed the newly added “download=true” option for getting logs via NMWebServices and AHSWebServices to be a better "format" option. (Xuan Gong via vinodkv)

2016-06-09 Thread aengineer
YARN-5191. Renamed the newly added “download=true” option for getting logs 
via NMWebServices and AHSWebServices to be a better "format" option. (Xuan Gong 
via vinodkv)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9378d942
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9378d942
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9378d942

Branch: refs/heads/HDFS-1312
Commit: 9378d9428f127eff7acd6c13544016cdbf2d65fb
Parents: 656c460
Author: Vinod Kumar Vavilapalli 
Authored: Thu Jun 9 12:30:58 2016 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Thu Jun 9 12:30:58 2016 -0700

--
 .../hadoop/yarn/webapp/util/WebAppUtils.java| 18 ++
 .../webapp/AHSWebServices.java  | 36 
 .../nodemanager/webapp/NMWebServices.java   | 33 +++---
 .../nodemanager/webapp/TestNMWebServices.java   | 15 +++-
 4 files changed, 74 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9378d942/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index faf4a77..3aa773a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -24,6 +24,7 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -400,4 +401,21 @@ public class WebAppUtils {
 }
 return aid;
   }
+
+  public static String getSupportedLogContentType(String format) {
+if (format.equalsIgnoreCase("text")) {
+  return "text/plain";
+} else if (format.equalsIgnoreCase("octet-stream")) {
+  return "application/octet-stream";
+}
+return null;
+  }
+
+  public static String getDefaultLogContentType() {
+return "text/plain";
+  }
+
+  public static List listSupportedLogContentType() {
+return Arrays.asList("text", "octet-stream");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9378d942/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 59dbd44..692b172 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -66,6 +66,7 @@ import 
org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import com.google.common.base.Joiner;
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
@@ -212,7 +213,7 @@ public class AHSWebServices extends WebServices {
   @Context HttpServletResponse res,
   @PathParam("containerid") String containerIdStr,
   @PathParam("filename") String filename,
-  @QueryParam("download") String download,
+  @QueryParam("format") String format,
   @QueryParam("size") String size) {
 init(res);
 ContainerId containerId;
@@ -223,9 +224,6 @@ public class AHSWebServices extends WebServices {
   "Invalid ContainerId: " + containerIdStr);
 }
 
-boolean downloadFile = parseBooleanParam(download);
-
-
 final long length = 

[17/24] hadoop git commit: HADOOP-13237: s3a initialization against public bucket fails if caller lacks any credentials. Contributed by Chris Nauroth

2016-06-09 Thread aengineer
HADOOP-13237: s3a initialization against public bucket fails if caller lacks 
any credentials. Contributed by Chris Nauroth


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/656c460c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/656c460c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/656c460c

Branch: refs/heads/HDFS-1312
Commit: 656c460c0e79ee144d6ef48d85cec04a1af3b2cc
Parents: 8ea9bbc
Author: Steve Loughran 
Authored: Thu Jun 9 16:36:27 2016 +0100
Committer: Steve Loughran 
Committed: Thu Jun 9 17:28:49 2016 +0100

--
 .../src/main/resources/core-default.xml | 13 -
 .../fs/s3a/AnonymousAWSCredentialsProvider.java | 11 
 .../fs/s3a/BasicAWSCredentialsProvider.java |  8 +++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 22 +---
 .../src/site/markdown/tools/hadoop-aws/index.md | 14 -
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   | 55 
 6 files changed, 113 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/656c460c/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index a65246b..8bb27ea 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -791,7 +791,18 @@
 
 
   fs.s3a.aws.credentials.provider
-  Class name of a credentials provider that implements 
com.amazonaws.auth.AWSCredentialsProvider. Omit if using access/secret keys or 
another authentication mechanism.
+  
+Class name of a credentials provider that implements
+com.amazonaws.auth.AWSCredentialsProvider.  Omit if using access/secret 
keys
+or another authentication mechanism.  The specified class must provide an
+accessible constructor accepting java.net.URI and
+org.apache.hadoop.conf.Configuration, or an accessible default constructor.
+Specifying org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider allows
+anonymous access to a publicly accessible S3 bucket without any 
credentials.
+Please note that allowing anonymous access to an S3 bucket compromises
+security and therefore is unsuitable for most use cases.  It can be useful
+for accessing public data sets without requiring AWS credentials.
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/656c460c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
index e62ec77..2c863fc 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
@@ -24,6 +24,17 @@ import com.amazonaws.auth.AWSCredentials;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+/**
+ * AnonymousAWSCredentialsProvider supports anonymous access to AWS services
+ * through the AWS SDK.  AWS requests will not be signed.  This is not suitable
+ * for most cases, because allowing anonymous access to an S3 bucket 
compromises
+ * security.  This can be useful for accessing public data sets without
+ * requiring AWS credentials.
+ *
+ * Please note that users may reference this class name from configuration
+ * property fs.s3a.aws.credentials.provider.  Therefore, changing the class 
name
+ * would be a backward-incompatible change.
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public class AnonymousAWSCredentialsProvider implements AWSCredentialsProvider 
{

http://git-wip-us.apache.org/repos/asf/hadoop/blob/656c460c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
index 2f721e4..3a5ee8c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
+++ 

[14/24] hadoop git commit: YARN-4308. ContainersAggregated CPU resource utilization reports negative usage in first few heartbeats. Contributed by Sunil G

2016-06-09 Thread aengineer
YARN-4308. ContainersAggregated CPU resource utilization reports negative usage 
in first few heartbeats. Contributed by Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1500a0a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1500a0a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1500a0a3

Branch: refs/heads/HDFS-1312
Commit: 1500a0a3009e453c9f05a93df7a78b4e185eef30
Parents: ae04765
Author: Naganarasimha 
Authored: Thu Jun 9 05:41:09 2016 +0530
Committer: Naganarasimha 
Committed: Thu Jun 9 05:41:09 2016 +0530

--
 .../yarn/util/ProcfsBasedProcessTree.java   |  8 +++
 .../util/ResourceCalculatorProcessTree.java |  4 +-
 .../yarn/util/WindowsBasedProcessTree.java  |  8 +++
 .../monitor/ContainersMonitorImpl.java  |  9 +++
 .../MockCPUResourceCalculatorProcessTree.java   | 70 
 .../MockResourceCalculatorProcessTree.java  |  5 ++
 .../TestContainersMonitorResourceChange.java| 62 -
 7 files changed, 163 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1500a0a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index bb9c183..80d49c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -467,6 +467,14 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
 return totalStime.add(BigInteger.valueOf(totalUtime));
   }
 
+  /**
+   * Get the CPU usage by all the processes in the process-tree in Unix.
+   * Note: UNAVAILABLE will be returned in case when CPU usage is not
+   * available. It is NOT advised to return any other error code.
+   *
+   * @return percentage CPU usage since the process-tree was created,
+   * {@link #UNAVAILABLE} if CPU usage cannot be calculated or not available.
+   */
   @Override
   public float getCpuUsagePercent() {
 BigInteger processTotalJiffies = getTotalProcessJiffies();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1500a0a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
index 7214c75..771ec86 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
@@ -187,9 +187,11 @@ public abstract class ResourceCalculatorProcessTree 
extends Configured {
* Get the CPU usage by all the processes in the process-tree based on
* average between samples as a ratio of overall CPU cycles similar to top.
* Thus, if 2 out of 4 cores are used this should return 200.0.
+   * Note: UNAVAILABLE will be returned in case when CPU usage is not
+   * available. It is NOT advised to return any other error code.
*
* @return percentage CPU usage since the process-tree was created,
-   * {@link #UNAVAILABLE} if it cannot be calculated.
+   * {@link #UNAVAILABLE} if CPU usage cannot be calculated or not available.
*/
   public float getCpuUsagePercent() {
 return UNAVAILABLE;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1500a0a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
index 7858292..1c7eaf7 100644
--- 

[1/2] hadoop git commit: HDFS-10500. Diskbalancer: Print out information when a plan is not generated. Contributed by Anu Engineer.

2016-06-09 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 32058f9b6 -> 4f6fe511c


HDFS-10500. Diskbalancer: Print out information when a plan is not generated. 
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82de3d3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82de3d3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82de3d3e

Branch: refs/heads/HDFS-1312
Commit: 82de3d3e0247d724f247f460ead253c20f65e545
Parents: 32058f9
Author: Anu Engineer 
Authored: Thu Jun 9 13:43:19 2016 -0700
Committer: Anu Engineer 
Committed: Thu Jun 9 13:43:19 2016 -0700

--
 .../diskbalancer/command/CancelCommand.java |  4 +-
 .../server/diskbalancer/command/Command.java| 19 +-
 .../diskbalancer/command/ExecuteCommand.java|  2 +-
 .../diskbalancer/command/PlanCommand.java   | 61 ++--
 4 files changed, 37 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82de3d3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
index f395802..3834d9b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
@@ -70,7 +70,7 @@ public class CancelCommand extends Command {
   // points us to the plan file, we can compute the hash as well as read
   // the address of the datanode from the plan file.
   String planFile = cmd.getOptionValue(DiskBalancer.CANCEL);
-  Preconditions.checkArgument(planFile == null || planFile.isEmpty(),
+  Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
   "Invalid plan file specified.");
   String planData = null;
   try (FSDataInputStream plan = open(planFile)) {
@@ -88,7 +88,7 @@ public class CancelCommand extends Command {
*/
   private void cancelPlan(String planData) throws IOException {
 Preconditions.checkNotNull(planData);
-NodePlan plan = readPlan(planData);
+NodePlan plan = NodePlan.parseJson(planData);
 String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
 Preconditions.checkNotNull(dataNodeAddress);
 ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82de3d3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index fb975a8..94a21d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -31,16 +31,13 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
-import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
-import org.apache.hadoop.hdfs.tools.DiskBalancer;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
-
+import org.apache.hadoop.hdfs.tools.DiskBalancer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.htrace.fasterxml.jackson.databind.ObjectMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -59,10 +56,10 @@ import java.util.Date;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.List;
 
 /**
  * Common interface for command handling.
@@ -394,16 +391,4 @@ public abstract class Command extends 

[2/2] hadoop git commit: HDFS-10501. DiskBalancer: Use the default datanode port if port is not provided. Contributed by Anu Engineer.

2016-06-09 Thread aengineer
HDFS-10501. DiskBalancer: Use the default datanode port if port is not 
provided. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f6fe511
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f6fe511
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f6fe511

Branch: refs/heads/HDFS-1312
Commit: 4f6fe511cf6cc85a4916a6add87cd9f0bc295cdc
Parents: 82de3d3
Author: Anu Engineer 
Authored: Thu Jun 9 19:47:01 2016 -0700
Committer: Anu Engineer 
Committed: Thu Jun 9 19:47:01 2016 -0700

--
 .../server/diskbalancer/command/QueryCommand.java | 18 --
 1 file changed, 16 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f6fe511/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
index 36448b8..ea7dbcc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
@@ -22,10 +22,12 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command;
 import com.google.common.base.Preconditions;
 import org.apache.commons.cli.CommandLine;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
 import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
 import org.apache.hadoop.hdfs.tools.DiskBalancer;
+import org.apache.hadoop.net.NetUtils;
 
 /**
  * Gets the current status of disk balancer command.
@@ -55,10 +57,22 @@ public class QueryCommand extends Command {
 verifyCommandOptions(DiskBalancer.QUERY, cmd);
 String nodeName = cmd.getOptionValue(DiskBalancer.QUERY);
 Preconditions.checkNotNull(nodeName);
-ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeName);
+nodeName = nodeName.trim();
+String nodeAddress = nodeName;
+
+// if the string is not name:port format use the default port.
+if(!nodeName.matches("^.*:\\d$")) {
+  int defaultIPC = NetUtils.createSocketAddr(
+  getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
+  DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
+  nodeAddress = nodeName + ":" + defaultIPC;
+  LOG.debug("Using default data node port :  {}", nodeAddress);
+}
+
+ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeAddress);
 try {
   DiskBalancerWorkStatus workStatus = dataNode.queryDiskBalancerPlan();
-  System.out.printf("Plan ID: %s Result: %s%n", workStatus.getPlanID(),
+  System.out.printf("Plan ID: %s %nResult: %s%n", workStatus.getPlanID(),
   workStatus.getResult().toString());
 
   if(cmd.hasOption(DiskBalancer.VERBOSE)) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: Revert "HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in Hadoop. Contributed by Vishwajeet Dusane."

2016-06-09 Thread cnauroth
Revert "HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in 
Hadoop. Contributed by Vishwajeet Dusane."

This reverts commit a8f03ef7ea8163c00ce5d72a4e1c77284befe5aa.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19259422
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19259422
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19259422

Branch: refs/heads/branch-2
Commit: 19259422945b94dfc7a4cc752bf171debcfbad5f
Parents: a8f03ef
Author: Chris Nauroth 
Authored: Thu Jun 9 15:17:18 2016 -0700
Committer: Chris Nauroth 
Committed: Thu Jun 9 15:17:18 2016 -0700

--
 .../src/main/resources/core-default.xml |   60 -
 .../conf/TestCommonConfigurationFields.java |6 -
 hadoop-project/src/site/site.xml|2 -
 .../dev-support/findbugs-exclude.xml|   24 -
 hadoop-tools/hadoop-azure-datalake/pom.xml  |  180 ---
 .../main/java/org/apache/hadoop/fs/adl/Adl.java |   52 -
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |   41 -
 ...hedRefreshTokenBasedAccessTokenProvider.java |  135 ---
 .../hadoop/fs/adl/oauth2/package-info.java  |   23 -
 .../org/apache/hadoop/fs/adl/package-info.java  |   23 -
 .../org/apache/hadoop/hdfs/web/ADLConfKeys.java |   61 -
 .../apache/hadoop/hdfs/web/BufferManager.java   |  180 ---
 .../web/PrivateAzureDataLakeFileSystem.java | 1108 --
 ...hedRefreshTokenBasedAccessTokenProvider.java |   37 -
 .../hadoop/hdfs/web/oauth2/package-info.java|   24 -
 .../apache/hadoop/hdfs/web/package-info.java|   25 -
 .../hadoop/hdfs/web/resources/ADLFlush.java |   49 -
 .../hdfs/web/resources/ADLGetOpParam.java   |   96 --
 .../hdfs/web/resources/ADLPostOpParam.java  |   97 --
 .../hdfs/web/resources/ADLPutOpParam.java   |   94 --
 .../hdfs/web/resources/ADLVersionInfo.java  |   51 -
 .../web/resources/AppendADLNoRedirectParam.java |   45 -
 .../web/resources/CreateADLNoRedirectParam.java |   44 -
 .../hadoop/hdfs/web/resources/LeaseParam.java   |   53 -
 .../web/resources/ReadADLNoRedirectParam.java   |   44 -
 .../hadoop/hdfs/web/resources/package-info.java |   27 -
 .../src/site/markdown/index.md  |  219 
 ...hedRefreshTokenBasedAccessTokenProvider.java |  147 ---
 hadoop-tools/hadoop-tools-dist/pom.xml  |6 -
 hadoop-tools/pom.xml|1 -
 30 files changed, 2954 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19259422/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 41bf6d8..490f1de 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2213,64 +2213,4 @@
   needs to be specified in net.topology.script.file.name.
 
   
-
-
-  
-
-  
-adl.feature.override.readahead
-true
-
-  Enables read aheads in the ADL client, the feature is used to
-  improve read throughput.
-  This works in conjunction with the value set in
-  adl.feature.override.readahead.max.buffersize.
-  When set to false the read ahead feature is turned off.
-  Default : True if not configured.
-
-  
-
-  
-adl.feature.override.readahead.max.buffersize
-8388608
-
-  Define maximum buffer size to cache read ahead data, this is
-  allocated per process to
-  cache read ahead data. Applicable only when
-  adl.feature.override.readahead is set to true.
-  Default : 8388608 Byte i.e. 8MB if not configured.
-
-  
-
-  
-adl.feature.override.readahead.max.concurrent.connection
-2
-
-  Define maximum concurrent connection can be established to
-  read ahead. If the data size is less than 4MB then only 1 read n/w
-  connection
-  is set. If the data size is less than 4MB but less than 8MB then 2 read
-  n/w connection
-  is set. Data greater than 8MB then value set under the property would
-  take
-  effect. Applicable only when adl.feature.override.readahead is set
-  to true and buffer size is greater than 8MB.
-  It is recommended to reset this property if the
-  adl.feature.override.readahead.max.buffersize
-  is less than 8MB to gain performance. Application has to consider
-  throttling limit for the account as well before configuring large
-  buffer size.
-
-  
-
-  
-fs.adl.impl
-org.apache.hadoop.fs.adl.AdlFileSystem
-  
-
-  
-

[1/2] hadoop git commit: Revert "HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in Hadoop. Contributed by Vishwajeet Dusane."

2016-06-09 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a8f03ef7e -> 192594229


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19259422/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
deleted file mode 100644
index 7f7e749..000
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.net.HttpURLConnection;
-
-/**
- * Extended Webhdfs PostOpParam to avoid redirect during append operation for
- * azure data lake storage.
- */
-
-public class ADLPostOpParam extends HttpOpParam {
-  private static final Domain DOMAIN = new Domain(NAME,
-  Op.class);
-
-  /**
-   * Constructor.
-   *
-   * @param str a string representation of the parameter value.
-   */
-  public ADLPostOpParam(final String str) {
-super(DOMAIN, DOMAIN.parse(str));
-  }
-
-  @Override
-  public final String getName() {
-return NAME;
-  }
-
-  /**
-   * Post operations.
-   */
-  public static enum Op implements HttpOpParam.Op {
-APPEND(true, false, HttpURLConnection.HTTP_OK);
-
-private final boolean redirect;
-private final boolean doOutput;
-private final int expectedHttpResponseCode;
-
-Op(final boolean doOut, final boolean doRedirect,
-final int expectHttpResponseCode) {
-  this.doOutput = doOut;
-  this.redirect = doRedirect;
-  this.expectedHttpResponseCode = expectHttpResponseCode;
-}
-
-@Override
-public Type getType() {
-  return Type.POST;
-}
-
-@Override
-public boolean getRequireAuth() {
-  return false;
-}
-
-@Override
-public boolean getDoOutput() {
-  return doOutput;
-}
-
-@Override
-public boolean getRedirect() {
-  return redirect;
-}
-
-@Override
-public int getExpectedHttpResponseCode() {
-  return expectedHttpResponseCode;
-}
-
-/**
- * @return a URI query string.
- */
-@Override
-public String toQueryString() {
-  return NAME + "=" + this;
-}
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19259422/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
deleted file mode 100644
index d300a1c..000
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.net.HttpURLConnection;
-
-/**
- * Extended Webhdfs PutOpParam to avoid redirect during Create operation for
- * azure data lake storage.
- */
-public class ADLPutOpParam 

[2/4] hadoop git commit: HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in Hadoop. Contributed by Vishwajeet Dusane.

2016-06-09 Thread cnauroth
HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in Hadoop. 
Contributed by Vishwajeet Dusane.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9581fb71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9581fb71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9581fb71

Branch: refs/heads/trunk
Commit: 9581fb715cbc8a6ad28566e83c6d0242a7306688
Parents: e383b73
Author: Chris Nauroth 
Authored: Thu Jun 9 14:33:31 2016 -0700
Committer: Chris Nauroth 
Committed: Thu Jun 9 14:33:31 2016 -0700

--
 .../src/main/resources/core-default.xml |   60 +
 .../conf/TestCommonConfigurationFields.java |6 +
 hadoop-project/src/site/site.xml|2 +
 .../dev-support/findbugs-exclude.xml|   24 +
 hadoop-tools/hadoop-azure-datalake/pom.xml  |  180 +++
 .../main/java/org/apache/hadoop/fs/adl/Adl.java |   52 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |   41 +
 ...hedRefreshTokenBasedAccessTokenProvider.java |  135 +++
 .../hadoop/fs/adl/oauth2/package-info.java  |   23 +
 .../org/apache/hadoop/fs/adl/package-info.java  |   23 +
 .../org/apache/hadoop/hdfs/web/ADLConfKeys.java |   61 +
 .../apache/hadoop/hdfs/web/BufferManager.java   |  180 +++
 .../web/PrivateAzureDataLakeFileSystem.java | 1108 ++
 ...hedRefreshTokenBasedAccessTokenProvider.java |   37 +
 .../hadoop/hdfs/web/oauth2/package-info.java|   24 +
 .../apache/hadoop/hdfs/web/package-info.java|   25 +
 .../hadoop/hdfs/web/resources/ADLFlush.java |   49 +
 .../hdfs/web/resources/ADLGetOpParam.java   |   96 ++
 .../hdfs/web/resources/ADLPostOpParam.java  |   97 ++
 .../hdfs/web/resources/ADLPutOpParam.java   |   94 ++
 .../hdfs/web/resources/ADLVersionInfo.java  |   51 +
 .../web/resources/AppendADLNoRedirectParam.java |   45 +
 .../web/resources/CreateADLNoRedirectParam.java |   44 +
 .../hadoop/hdfs/web/resources/LeaseParam.java   |   53 +
 .../web/resources/ReadADLNoRedirectParam.java   |   44 +
 .../hadoop/hdfs/web/resources/package-info.java |   27 +
 .../src/site/markdown/index.md  |  219 
 ...hedRefreshTokenBasedAccessTokenProvider.java |  147 +++
 hadoop-tools/hadoop-tools-dist/pom.xml  |6 +
 hadoop-tools/pom.xml|1 +
 30 files changed, 2954 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9581fb71/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 39b7132..f1d77dd 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2213,4 +2213,64 @@
   needs to be specified in net.topology.script.file.name.
 
   
+
+
+  
+
+  
+adl.feature.override.readahead
+true
+
+  Enables read aheads in the ADL client, the feature is used to
+  improve read throughput.
+  This works in conjunction with the value set in
+  adl.feature.override.readahead.max.buffersize.
+  When set to false the read ahead feature is turned off.
+  Default : True if not configured.
+
+  
+
+  
+adl.feature.override.readahead.max.buffersize
+8388608
+
+  Define maximum buffer size to cache read ahead data, this is
+  allocated per process to
+  cache read ahead data. Applicable only when
+  adl.feature.override.readahead is set to true.
+  Default : 8388608 Byte i.e. 8MB if not configured.
+
+  
+
+  
+adl.feature.override.readahead.max.concurrent.connection
+2
+
+  Define maximum concurrent connection can be established to
+  read ahead. If the data size is less than 4MB then only 1 read n/w
+  connection
+  is set. If the data size is less than 4MB but less than 8MB then 2 read
+  n/w connection
+  is set. Data greater than 8MB then value set under the property would
+  take
+  effect. Applicable only when adl.feature.override.readahead is set
+  to true and buffer size is greater than 8MB.
+  It is recommended to reset this property if the
+  adl.feature.override.readahead.max.buffersize
+  is less than 8MB to gain performance. Application has to consider
+  throttling limit for the account as well before configuring large
+  buffer size.
+
+  
+
+  
+fs.adl.impl
+org.apache.hadoop.fs.adl.AdlFileSystem
+  
+
+  
+fs.AbstractFileSystem.adl.impl
+org.apache.hadoop.fs.adl.Adl
+  
+
 


[3/4] hadoop git commit: HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in Hadoop. Contributed by Vishwajeet Dusane.

2016-06-09 Thread cnauroth
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8f03ef7/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
new file mode 100644
index 000..7f7e749
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/**
+ * Extended Webhdfs PostOpParam to avoid redirect during append operation for
+ * azure data lake storage.
+ */
+
+public class ADLPostOpParam extends HttpOpParam {
+  private static final Domain DOMAIN = new Domain(NAME,
+  Op.class);
+
+  /**
+   * Constructor.
+   *
+   * @param str a string representation of the parameter value.
+   */
+  public ADLPostOpParam(final String str) {
+super(DOMAIN, DOMAIN.parse(str));
+  }
+
+  @Override
+  public final String getName() {
+return NAME;
+  }
+
+  /**
+   * Post operations.
+   */
+  public static enum Op implements HttpOpParam.Op {
+APPEND(true, false, HttpURLConnection.HTTP_OK);
+
+private final boolean redirect;
+private final boolean doOutput;
+private final int expectedHttpResponseCode;
+
+Op(final boolean doOut, final boolean doRedirect,
+final int expectHttpResponseCode) {
+  this.doOutput = doOut;
+  this.redirect = doRedirect;
+  this.expectedHttpResponseCode = expectHttpResponseCode;
+}
+
+@Override
+public Type getType() {
+  return Type.POST;
+}
+
+@Override
+public boolean getRequireAuth() {
+  return false;
+}
+
+@Override
+public boolean getDoOutput() {
+  return doOutput;
+}
+
+@Override
+public boolean getRedirect() {
+  return redirect;
+}
+
+@Override
+public int getExpectedHttpResponseCode() {
+  return expectedHttpResponseCode;
+}
+
+/**
+ * @return a URI query string.
+ */
+@Override
+public String toQueryString() {
+  return NAME + "=" + this;
+}
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8f03ef7/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
new file mode 100644
index 000..d300a1c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/**
+ * Extended Webhdfs PutOpParam to avoid redirect during Create operation for
+ * azure data lake storage.
+ */
+public class ADLPutOpParam extends HttpOpParam {
+  private static final Domain DOMAIN = new Domain(NAME, Op.class);
+
+  

[1/4] hadoop git commit: HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in Hadoop. Contributed by Vishwajeet Dusane.

2016-06-09 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6350e4b1f -> a8f03ef7e
  refs/heads/trunk e383b732c -> 9581fb715


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9581fb71/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
new file mode 100644
index 000..7f7e749
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPostOpParam.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/**
+ * Extended Webhdfs PostOpParam to avoid redirect during append operation for
+ * azure data lake storage.
+ */
+
+public class ADLPostOpParam extends HttpOpParam {
+  private static final Domain DOMAIN = new Domain(NAME,
+  Op.class);
+
+  /**
+   * Constructor.
+   *
+   * @param str a string representation of the parameter value.
+   */
+  public ADLPostOpParam(final String str) {
+super(DOMAIN, DOMAIN.parse(str));
+  }
+
+  @Override
+  public final String getName() {
+return NAME;
+  }
+
+  /**
+   * Post operations.
+   */
+  public static enum Op implements HttpOpParam.Op {
+APPEND(true, false, HttpURLConnection.HTTP_OK);
+
+private final boolean redirect;
+private final boolean doOutput;
+private final int expectedHttpResponseCode;
+
+Op(final boolean doOut, final boolean doRedirect,
+final int expectHttpResponseCode) {
+  this.doOutput = doOut;
+  this.redirect = doRedirect;
+  this.expectedHttpResponseCode = expectHttpResponseCode;
+}
+
+@Override
+public Type getType() {
+  return Type.POST;
+}
+
+@Override
+public boolean getRequireAuth() {
+  return false;
+}
+
+@Override
+public boolean getDoOutput() {
+  return doOutput;
+}
+
+@Override
+public boolean getRedirect() {
+  return redirect;
+}
+
+@Override
+public int getExpectedHttpResponseCode() {
+  return expectedHttpResponseCode;
+}
+
+/**
+ * @return a URI query string.
+ */
+@Override
+public String toQueryString() {
+  return NAME + "=" + this;
+}
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9581fb71/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
new file mode 100644
index 000..d300a1c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/resources/ADLPutOpParam.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/**
+ * Extended Webhdfs PutOpParam to avoid redirect during Create operation for
+ * azure data lake storage.
+ 

[4/4] hadoop git commit: HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in Hadoop. Contributed by Vishwajeet Dusane.

2016-06-09 Thread cnauroth
HADOOP-12666. Support Microsoft Azure Data Lake - as a file system in Hadoop. 
Contributed by Vishwajeet Dusane.

(cherry picked from commit 9581fb715cbc8a6ad28566e83c6d0242a7306688)

Conflicts:
hadoop-tools/hadoop-tools-dist/pom.xml
hadoop-tools/pom.xml


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8f03ef7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8f03ef7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8f03ef7

Branch: refs/heads/branch-2
Commit: a8f03ef7ea8163c00ce5d72a4e1c77284befe5aa
Parents: 6350e4b
Author: Chris Nauroth 
Authored: Thu Jun 9 14:49:05 2016 -0700
Committer: Chris Nauroth 
Committed: Thu Jun 9 14:49:05 2016 -0700

--
 .../src/main/resources/core-default.xml |   60 +
 .../conf/TestCommonConfigurationFields.java |6 +
 hadoop-project/src/site/site.xml|2 +
 .../dev-support/findbugs-exclude.xml|   24 +
 hadoop-tools/hadoop-azure-datalake/pom.xml  |  180 +++
 .../main/java/org/apache/hadoop/fs/adl/Adl.java |   52 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |   41 +
 ...hedRefreshTokenBasedAccessTokenProvider.java |  135 +++
 .../hadoop/fs/adl/oauth2/package-info.java  |   23 +
 .../org/apache/hadoop/fs/adl/package-info.java  |   23 +
 .../org/apache/hadoop/hdfs/web/ADLConfKeys.java |   61 +
 .../apache/hadoop/hdfs/web/BufferManager.java   |  180 +++
 .../web/PrivateAzureDataLakeFileSystem.java | 1108 ++
 ...hedRefreshTokenBasedAccessTokenProvider.java |   37 +
 .../hadoop/hdfs/web/oauth2/package-info.java|   24 +
 .../apache/hadoop/hdfs/web/package-info.java|   25 +
 .../hadoop/hdfs/web/resources/ADLFlush.java |   49 +
 .../hdfs/web/resources/ADLGetOpParam.java   |   96 ++
 .../hdfs/web/resources/ADLPostOpParam.java  |   97 ++
 .../hdfs/web/resources/ADLPutOpParam.java   |   94 ++
 .../hdfs/web/resources/ADLVersionInfo.java  |   51 +
 .../web/resources/AppendADLNoRedirectParam.java |   45 +
 .../web/resources/CreateADLNoRedirectParam.java |   44 +
 .../hadoop/hdfs/web/resources/LeaseParam.java   |   53 +
 .../web/resources/ReadADLNoRedirectParam.java   |   44 +
 .../hadoop/hdfs/web/resources/package-info.java |   27 +
 .../src/site/markdown/index.md  |  219 
 ...hedRefreshTokenBasedAccessTokenProvider.java |  147 +++
 hadoop-tools/hadoop-tools-dist/pom.xml  |6 +
 hadoop-tools/pom.xml|1 +
 30 files changed, 2954 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8f03ef7/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 490f1de..41bf6d8 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2213,4 +2213,64 @@
   needs to be specified in net.topology.script.file.name.
 
   
+
+
+  
+
+  
+adl.feature.override.readahead
+true
+
+  Enables read aheads in the ADL client, the feature is used to
+  improve read throughput.
+  This works in conjunction with the value set in
+  adl.feature.override.readahead.max.buffersize.
+  When set to false the read ahead feature is turned off.
+  Default : True if not configured.
+
+  
+
+  
+adl.feature.override.readahead.max.buffersize
+8388608
+
+  Define maximum buffer size to cache read ahead data, this is
+  allocated per process to
+  cache read ahead data. Applicable only when
+  adl.feature.override.readahead is set to true.
+  Default : 8388608 Byte i.e. 8MB if not configured.
+
+  
+
+  
+adl.feature.override.readahead.max.concurrent.connection
+2
+
+  Define maximum concurrent connection can be established to
+  read ahead. If the data size is less than 4MB then only 1 read n/w
+  connection
+  is set. If the data size is less than 4MB but less than 8MB then 2 read
+  n/w connection
+  is set. Data greater than 8MB then value set under the property would
+  take
+  effect. Applicable only when adl.feature.override.readahead is set
+  to true and buffer size is greater than 8MB.
+  It is recommended to reset this property if the
+  adl.feature.override.readahead.max.buffersize
+  is less than 8MB to gain performance. Application has to consider
+  throttling limit for the account as well before configuring large
+  buffer size.
+
+  
+
+  
+

hadoop git commit: HDFS-9905. WebHdfsFileSystem#runWithRetry should display original stack trace on error. (Wei-Chiu Chuang via iwasakims)

2016-06-09 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 577466c1f -> 5a3fed060


HDFS-9905. WebHdfsFileSystem#runWithRetry should display original stack trace 
on error. (Wei-Chiu Chuang via iwasakims)

cherry-picked from 6fcde2e38da04cae3aad6b13cf442af211f71506


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a3fed06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a3fed06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a3fed06

Branch: refs/heads/branch-2.7
Commit: 5a3fed060264596ecdce294b33770faa4d4b51b0
Parents: 577466c
Author: Masatake Iwasaki 
Authored: Sat Apr 23 23:37:56 2016 +0900
Committer: Eric Payne 
Committed: Thu Jun 9 20:48:58 2016 +

--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  6 +++--
 .../hadoop/hdfs/web/TestWebHdfsTimeouts.java| 25 +---
 2 files changed, 20 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a3fed06/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index a72fa27..ccfed0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -641,8 +641,10 @@ public class WebHdfsFileSystem extends FileSystem
 node = url.getAuthority();
   }
   try {
-  ioe = ioe.getClass().getConstructor(String.class)
-.newInstance(node + ": " + ioe.getMessage());
+IOException newIoe = ioe.getClass().getConstructor(String.class)
+.newInstance(node + ": " + ioe.getMessage());
+newIoe.setStackTrace(ioe.getStackTrace());
+ioe = newIoe;
   } catch (NoSuchMethodException | SecurityException 
| InstantiationException | IllegalAccessException
| IllegalArgumentException | InvocationTargetException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a3fed06/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
index 5419093..ee97d73 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdfs.web;
 
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
 import java.io.BufferedReader;
@@ -43,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -115,7 +115,8 @@ public class TestWebHdfsTimeouts {
   fs.listFiles(new Path("/"), false);
   fail("expected timeout");
 } catch (SocketTimeoutException e) {
-  assertEquals(fs.getUri().getAuthority() + ": connect timed out", 
e.getMessage());
+  GenericTestUtils.assertExceptionContains(fs.getUri().getAuthority()
+  + ": connect timed out",e);
 }
   }
 
@@ -128,7 +129,8 @@ public class TestWebHdfsTimeouts {
   fs.listFiles(new Path("/"), false);
   fail("expected timeout");
 } catch (SocketTimeoutException e) {
-  assertEquals(fs.getUri().getAuthority() + ": Read timed out", 
e.getMessage());
+  GenericTestUtils.assertExceptionContains(fs.getUri().getAuthority() +
+  ": Read timed out", e);
 }
   }
 
@@ -143,7 +145,8 @@ public class TestWebHdfsTimeouts {
   fs.getDelegationToken("renewer");
   fail("expected timeout");
 } catch (SocketTimeoutException e) {
-  assertEquals(fs.getUri().getAuthority() + ": connect timed out", 
e.getMessage());
+  GenericTestUtils.assertExceptionContains(fs.getUri().getAuthority() +
+  ": connect timed out", e);
 }
   }
 
@@ -157,7 +160,8 @@ public class TestWebHdfsTimeouts {
   fs.getDelegationToken("renewer");
   

hadoop git commit: HADOOP-12893. Verify LICENSE.txt and NOTICE.txt. Contributed by Xiao Chen, Akira Ajisaka, and Andrew Wang.

2016-06-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 58c348685 -> e383b732c


HADOOP-12893. Verify LICENSE.txt and NOTICE.txt. Contributed by Xiao Chen, 
Akira Ajisaka, and Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e383b732
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e383b732
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e383b732

Branch: refs/heads/trunk
Commit: e383b732c54c542482b0b836e2d2c46eb49b4e2d
Parents: 58c3486
Author: Andrew Wang 
Authored: Thu Jun 9 13:54:14 2016 -0700
Committer: Andrew Wang 
Committed: Thu Jun 9 13:54:14 2016 -0700

--
 LICENSE.txt | 1017 +-
 NOTICE.txt  |  266 ++
 hadoop-build-tools/pom.xml  |   41 ++
 hadoop-project-dist/pom.xml |2 +
 hadoop-project/pom.xml  |   19 +-
 pom.xml |   26 +
 6 files changed, 1367 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e383b732/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 929e2a8..44880df 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -320,7 +320,9 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 
OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-For com.google.re2j.* classes:
+The binary distribution of this product bundles these dependencies under the
+following license:
+re2j 1.0
 -
 This is a work derived from Russ Cox's RE2 in Go, whose license
 http://golang.org/LICENSE is as follows:
@@ -548,12 +550,14 @@ 
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
 hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
 hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
+And the binary distribution of this product bundles these dependencies under 
the
+following license:
+Mockito 1.8.5
+SLF4J 1.7.10
 

 
 The MIT License (MIT)
 
-Copyright (c) 2011-2016 Twitter, Inc.
-
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
 in the Software without restriction, including without limitation the rights
@@ -648,3 +652,1010 @@ 
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3.v3.js
 
 D3 is available under a 3-clause BSD license. For details, see:
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3-LICENSE
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+HSQLDB Database 2.0.0
+
+"COPYRIGHTS AND LICENSES (based on BSD License)
+
+For work developed by the HSQL Development Group:
+
+Copyright (c) 2001-2016, The HSQL Development Group
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+Neither the name of the HSQL Development Group nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS""
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG,
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+For work originally developed by the Hypersonic SQL Group:
+
+Copyright (c) 1995-2000 by the Hypersonic SQL Group.
+All rights 

hadoop git commit: HADOOP-13175. Remove hadoop-ant from hadoop-tools. Contributed by Chris Douglas.

2016-06-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 31ffaf76f -> 58c348685


HADOOP-13175. Remove hadoop-ant from hadoop-tools. Contributed by Chris Douglas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58c34868
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58c34868
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58c34868

Branch: refs/heads/trunk
Commit: 58c3486850c0503aecdeae8b67bb7e6bc42b4da8
Parents: 31ffaf7
Author: Andrew Wang 
Authored: Thu Jun 9 13:49:52 2016 -0700
Committer: Andrew Wang 
Committed: Thu Jun 9 13:49:52 2016 -0700

--
 hadoop-project/pom.xml  |   5 -
 hadoop-tools/hadoop-ant/pom.xml |  56 -
 .../java/org/apache/hadoop/ant/DfsTask.java | 220 ---
 .../ant/condition/DfsBaseConditional.java   |  68 --
 .../apache/hadoop/ant/condition/DfsExists.java  |  24 --
 .../apache/hadoop/ant/condition/DfsIsDir.java   |  24 --
 .../apache/hadoop/ant/condition/DfsZeroLen.java |  24 --
 .../resources/org/apache/hadoop/ant/antlib.xml  |  29 ---
 hadoop-tools/hadoop-tools-dist/pom.xml  |   6 -
 hadoop-tools/pom.xml|   1 -
 10 files changed, 457 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58c34868/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 4c618a1..35166b1 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -375,11 +375,6 @@
 hadoop-extras
 ${project.version}
   
-  
-org.apache.hadoop
-hadoop-ant
-${project.version}
-  
 
   
 org.apache.hadoop

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58c34868/hadoop-tools/hadoop-ant/pom.xml
--
diff --git a/hadoop-tools/hadoop-ant/pom.xml b/hadoop-tools/hadoop-ant/pom.xml
deleted file mode 100644
index e0b038e..000
--- a/hadoop-tools/hadoop-ant/pom.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-
-
-http://maven.apache.org/POM/4.0.0;
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-  http://maven.apache.org/xsd/maven-4.0.0.xsd;>
-  4.0.0
-  
-org.apache.hadoop
-hadoop-project
-3.0.0-alpha1-SNAPSHOT
-../../hadoop-project
-  
-  org.apache.hadoop
-  hadoop-ant
-  3.0.0-alpha1-SNAPSHOT
-  Apache Hadoop Ant Tasks
-  Apache Hadoop Ant Tasks
-  jar
-
-  
-
-  org.apache.ant
-  ant
-  provided
-
-
-  org.apache.hadoop
-  hadoop-annotations
-  provided
-
-
-  org.apache.hadoop
-  hadoop-common
-  provided
-
-
-  org.apache.hadoop
-  hadoop-hdfs-client
-  provided
-
-  
-
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58c34868/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java
--
diff --git 
a/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java 
b/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java
deleted file mode 100644
index 36119f5..000
--- a/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ant;
-
-import java.io.ByteArrayOutputStream;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.io.UnsupportedEncodingException;
-import java.security.AccessController;
-import java.security.PrivilegedAction;
-import java.util.LinkedList;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.tools.ant.AntClassLoader;
-import org.apache.tools.ant.BuildException;
-import org.apache.tools.ant.Task;
-import org.apache.hadoop.util.ToolRunner;
-import 

[1/3] hadoop git commit: HADOOP-12537 S3A to support Amazon STS temporary credentials. Contributed by Sean Mackrory.

2016-06-09 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f6cd1bcf8 -> 6350e4b1f
  refs/heads/branch-2.8 2be48e7d1 -> faf9a9f4e
  refs/heads/trunk 9378d9428 -> 31ffaf76f


HADOOP-12537 S3A to support Amazon STS temporary credentials. Contributed by 
Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/faf9a9f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/faf9a9f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/faf9a9f4

Branch: refs/heads/branch-2.8
Commit: faf9a9f4e0f256e5d00df841d6c1fa0c973a1b2e
Parents: 2be48e7
Author: Steve Loughran 
Authored: Thu Jun 9 20:58:30 2016 +0100
Committer: Steve Loughran 
Committed: Thu Jun 9 20:58:30 2016 +0100

--
 .../src/main/resources/core-default.xml |   5 +
 hadoop-project/pom.xml  |   8 +-
 hadoop-tools/hadoop-aws/pom.xml |   5 +
 .../fs/s3a/BasicAWSCredentialsProvider.java |   3 +-
 .../org/apache/hadoop/fs/s3a/Constants.java |   3 +
 .../s3a/CredentialInitializationException.java  |  46 ++
 .../fs/s3a/TemporaryAWSCredentialsProvider.java |  70 +
 .../src/site/markdown/tools/hadoop-aws/index.md |  71 -
 .../fs/s3a/TestS3ATemporaryCredentials.java | 150 +++
 9 files changed, 357 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/faf9a9f4/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 7963f33..608e68e 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -761,6 +761,11 @@
 
 
 
+  fs.s3a.session.token
+  The session token used with temporary credentials. Used only 
with provider 
org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider.
+
+
+
   fs.s3a.connection.maximum
   15
   Controls the maximum number of simultaneous connections to 
S3.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/faf9a9f4/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 388a375..492afed 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -112,6 +112,7 @@
 1.0-beta-1
 1.0-alpha-8
 900
+1.10.6
   
 
   
@@ -685,7 +686,12 @@
   
 com.amazonaws
 aws-java-sdk-s3
-1.10.6
+${aws-java-sdk.version}
+  
+  
+com.amazonaws
+aws-java-sdk-sts
+${aws-java-sdk.version}
   
   
 org.apache.mina

http://git-wip-us.apache.org/repos/asf/hadoop/blob/faf9a9f4/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index a084318..fc0ef0f 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -214,6 +214,11 @@
   compile
 
 
+  com.amazonaws
+  aws-java-sdk-sts
+  test
+
+
   junit
   junit
   test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/faf9a9f4/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
index 3a5ee8c..61be43f 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.fs.s3a;
 
-import com.amazonaws.AmazonClientException;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.auth.BasicAWSCredentials;
 import com.amazonaws.auth.AWSCredentials;
@@ -49,7 +48,7 @@ public class BasicAWSCredentialsProvider implements 
AWSCredentialsProvider {
 if (!StringUtils.isEmpty(accessKey) && !StringUtils.isEmpty(secretKey)) {
   return new BasicAWSCredentials(accessKey, secretKey);
 }
-throw new AmazonClientException(
+throw new CredentialInitializationException(
 "Access key or secret key is null");
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/faf9a9f4/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java

[3/3] hadoop git commit: HADOOP-12537 S3A to support Amazon STS temporary credentials. Contributed by Sean Mackrory.

2016-06-09 Thread stevel
HADOOP-12537 S3A to support Amazon STS temporary credentials. Contributed by 
Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31ffaf76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31ffaf76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31ffaf76

Branch: refs/heads/trunk
Commit: 31ffaf76f2b6e1fd2a141daa4daaebdfecefe727
Parents: 9378d94
Author: Steve Loughran 
Authored: Thu Jun 9 20:58:30 2016 +0100
Committer: Steve Loughran 
Committed: Thu Jun 9 21:00:47 2016 +0100

--
 .../src/main/resources/core-default.xml |   5 +
 hadoop-project/pom.xml  |   8 +-
 hadoop-tools/hadoop-aws/pom.xml |   5 +
 .../fs/s3a/BasicAWSCredentialsProvider.java |   3 +-
 .../org/apache/hadoop/fs/s3a/Constants.java |   3 +
 .../s3a/CredentialInitializationException.java  |  46 ++
 .../fs/s3a/TemporaryAWSCredentialsProvider.java |  70 +
 .../src/site/markdown/tools/hadoop-aws/index.md |  71 -
 .../fs/s3a/TestS3ATemporaryCredentials.java | 150 +++
 9 files changed, 357 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 8bb27ea..39b7132 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -806,6 +806,11 @@
 
 
 
+  fs.s3a.session.token
+  The session token used with temporary credentials. Used only 
with provider 
org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider.
+
+
+
   fs.s3a.connection.maximum
   15
   Controls the maximum number of simultaneous connections to 
S3.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 2b6b162..4c618a1 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -116,6 +116,7 @@
 1.0-beta-1
 1.0-alpha-8
 900
+1.10.6
   
 
   
@@ -690,7 +691,12 @@
   
 com.amazonaws
 aws-java-sdk-s3
-1.10.6
+${aws-java-sdk.version}
+  
+  
+com.amazonaws
+aws-java-sdk-sts
+${aws-java-sdk.version}
   
   
 org.apache.mina

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index c95f1e6..7c25e60 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -231,6 +231,11 @@
   compile
 
 
+  com.amazonaws
+  aws-java-sdk-sts
+  test
+
+
   junit
   junit
   test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
index 3a5ee8c..61be43f 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.fs.s3a;
 
-import com.amazonaws.AmazonClientException;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.auth.BasicAWSCredentials;
 import com.amazonaws.auth.AWSCredentials;
@@ -49,7 +48,7 @@ public class BasicAWSCredentialsProvider implements 
AWSCredentialsProvider {
 if (!StringUtils.isEmpty(accessKey) && !StringUtils.isEmpty(secretKey)) {
   return new BasicAWSCredentials(accessKey, secretKey);
 }
-throw new AmazonClientException(
+throw new CredentialInitializationException(
 "Access key or secret key is null");
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ffaf76/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 

[2/3] hadoop git commit: HADOOP-12537 S3A to support Amazon STS temporary credentials. Contributed by Sean Mackrory.

2016-06-09 Thread stevel
HADOOP-12537 S3A to support Amazon STS temporary credentials. Contributed by 
Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6350e4b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6350e4b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6350e4b1

Branch: refs/heads/branch-2
Commit: 6350e4b1fb7a552b55b5eaedd8f8ea6bb55ed555
Parents: f6cd1bc
Author: Steve Loughran 
Authored: Thu Jun 9 20:58:30 2016 +0100
Committer: Steve Loughran 
Committed: Thu Jun 9 20:59:54 2016 +0100

--
 .../src/main/resources/core-default.xml |   5 +
 hadoop-project/pom.xml  |   8 +-
 hadoop-tools/hadoop-aws/pom.xml |   5 +
 .../fs/s3a/BasicAWSCredentialsProvider.java |   3 +-
 .../org/apache/hadoop/fs/s3a/Constants.java |   3 +
 .../s3a/CredentialInitializationException.java  |  46 ++
 .../fs/s3a/TemporaryAWSCredentialsProvider.java |  70 +
 .../src/site/markdown/tools/hadoop-aws/index.md |  71 -
 .../fs/s3a/TestS3ATemporaryCredentials.java | 150 +++
 9 files changed, 357 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6350e4b1/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 95582a8..490f1de 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -799,6 +799,11 @@
 
 
 
+  fs.s3a.session.token
+  The session token used with temporary credentials. Used only 
with provider 
org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider.
+
+
+
   fs.s3a.connection.maximum
   15
   Controls the maximum number of simultaneous connections to 
S3.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6350e4b1/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 1eac7f1..6fa660a 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -112,6 +112,7 @@
 1.0-beta-1
 1.0-alpha-8
 900
+1.10.6
   
 
   
@@ -685,7 +686,12 @@
   
 com.amazonaws
 aws-java-sdk-s3
-1.10.6
+${aws-java-sdk.version}
+  
+  
+com.amazonaws
+aws-java-sdk-sts
+${aws-java-sdk.version}
   
   
 org.apache.mina

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6350e4b1/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index e11d043..aada612 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -214,6 +214,11 @@
   compile
 
 
+  com.amazonaws
+  aws-java-sdk-sts
+  test
+
+
   junit
   junit
   test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6350e4b1/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
index 3a5ee8c..61be43f 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.fs.s3a;
 
-import com.amazonaws.AmazonClientException;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.auth.BasicAWSCredentials;
 import com.amazonaws.auth.AWSCredentials;
@@ -49,7 +48,7 @@ public class BasicAWSCredentialsProvider implements 
AWSCredentialsProvider {
 if (!StringUtils.isEmpty(accessKey) && !StringUtils.isEmpty(secretKey)) {
   return new BasicAWSCredentials(accessKey, secretKey);
 }
-throw new AmazonClientException(
+throw new CredentialInitializationException(
 "Access key or secret key is null");
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6350e4b1/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 

hadoop git commit: YARN-5191. Renamed the newly added “download=true” option for getting logs via NMWebServices and AHSWebServices to be a better "format" option. (Xuan Gong via vinodkv)

2016-06-09 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 aef6e455b -> f6cd1bcf8


YARN-5191. Renamed the newly added “download=true” option for getting logs 
via NMWebServices and AHSWebServices to be a better "format" option. (Xuan Gong 
via vinodkv)

(cherry picked from commit 9378d9428f127eff7acd6c13544016cdbf2d65fb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6cd1bcf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6cd1bcf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6cd1bcf

Branch: refs/heads/branch-2
Commit: f6cd1bcf896ac91157dd02b0ae988c06eb70706b
Parents: aef6e45
Author: Vinod Kumar Vavilapalli 
Authored: Thu Jun 9 12:30:58 2016 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Thu Jun 9 12:32:53 2016 -0700

--
 .../hadoop/yarn/webapp/util/WebAppUtils.java| 18 ++
 .../webapp/AHSWebServices.java  | 36 
 .../nodemanager/webapp/NMWebServices.java   | 33 +++---
 .../nodemanager/webapp/TestNMWebServices.java   | 15 +++-
 4 files changed, 74 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6cd1bcf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index faf4a77..3aa773a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -24,6 +24,7 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -400,4 +401,21 @@ public class WebAppUtils {
 }
 return aid;
   }
+
+  public static String getSupportedLogContentType(String format) {
+if (format.equalsIgnoreCase("text")) {
+  return "text/plain";
+} else if (format.equalsIgnoreCase("octet-stream")) {
+  return "application/octet-stream";
+}
+return null;
+  }
+
+  public static String getDefaultLogContentType() {
+return "text/plain";
+  }
+
+  public static List listSupportedLogContentType() {
+return Arrays.asList("text", "octet-stream");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6cd1bcf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 59dbd44..692b172 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -66,6 +66,7 @@ import 
org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import com.google.common.base.Joiner;
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
@@ -212,7 +213,7 @@ public class AHSWebServices extends WebServices {
   @Context HttpServletResponse res,
   @PathParam("containerid") String containerIdStr,
   @PathParam("filename") String filename,
-  @QueryParam("download") String download,
+  @QueryParam("format") String format,
   @QueryParam("size") String size) {
 init(res);
 ContainerId containerId;
@@ -223,9 +224,6 @@ public class AHSWebServices extends WebServices {
   

hadoop git commit: YARN-5191. Renamed the newly added “download=true” option for getting logs via NMWebServices and AHSWebServices to be a better "format" option. (Xuan Gong via vinodkv)

2016-06-09 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 656c460c0 -> 9378d9428


YARN-5191. Renamed the newly added “download=true” option for getting logs 
via NMWebServices and AHSWebServices to be a better "format" option. (Xuan Gong 
via vinodkv)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9378d942
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9378d942
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9378d942

Branch: refs/heads/trunk
Commit: 9378d9428f127eff7acd6c13544016cdbf2d65fb
Parents: 656c460
Author: Vinod Kumar Vavilapalli 
Authored: Thu Jun 9 12:30:58 2016 -0700
Committer: Vinod Kumar Vavilapalli 
Committed: Thu Jun 9 12:30:58 2016 -0700

--
 .../hadoop/yarn/webapp/util/WebAppUtils.java| 18 ++
 .../webapp/AHSWebServices.java  | 36 
 .../nodemanager/webapp/NMWebServices.java   | 33 +++---
 .../nodemanager/webapp/TestNMWebServices.java   | 15 +++-
 4 files changed, 74 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9378d942/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index faf4a77..3aa773a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -24,6 +24,7 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -400,4 +401,21 @@ public class WebAppUtils {
 }
 return aid;
   }
+
+  public static String getSupportedLogContentType(String format) {
+if (format.equalsIgnoreCase("text")) {
+  return "text/plain";
+} else if (format.equalsIgnoreCase("octet-stream")) {
+  return "application/octet-stream";
+}
+return null;
+  }
+
+  public static String getDefaultLogContentType() {
+return "text/plain";
+  }
+
+  public static List listSupportedLogContentType() {
+return Arrays.asList("text", "octet-stream");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9378d942/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 59dbd44..692b172 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -66,6 +66,7 @@ import 
org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import com.google.common.base.Joiner;
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
@@ -212,7 +213,7 @@ public class AHSWebServices extends WebServices {
   @Context HttpServletResponse res,
   @PathParam("containerid") String containerIdStr,
   @PathParam("filename") String filename,
-  @QueryParam("download") String download,
+  @QueryParam("format") String format,
   @QueryParam("size") String size) {
 init(res);
 ContainerId containerId;
@@ -223,9 +224,6 @@ public class AHSWebServices extends WebServices {
   "Invalid ContainerId: " + containerIdStr);
 }
 
-boolean downloadFile = 

hadoop git commit: YARN-5199. Close LogReader in NMWebServices#getLogs. Contributed by Xuan Gong.

2016-06-09 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7e09601a9 -> 2be48e7d1


YARN-5199. Close LogReader in NMWebServices#getLogs. Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2be48e7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2be48e7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2be48e7d

Branch: refs/heads/branch-2.8
Commit: 2be48e7d1534452bde9c86028b8059fac66d0829
Parents: 7e09601
Author: Junping Du 
Authored: Thu Jun 9 12:29:25 2016 -0700
Committer: Junping Du 
Committed: Thu Jun 9 12:29:25 2016 -0700

--
 .../server/nodemanager/webapp/NMWebServices.java  | 18 +++---
 1 file changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2be48e7d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
index fddeb04..06e9abc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
@@ -36,7 +36,7 @@ import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.Status;
 import javax.ws.rs.core.StreamingOutput;
 import javax.ws.rs.core.UriInfo;
-
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -241,13 +241,17 @@ public class NMWebServices {
 @Override
 public void write(OutputStream os) throws IOException,
 WebApplicationException {
-  int bufferSize = 65536;
-  byte[] buf = new byte[bufferSize];
-  int len;
-  while ((len = fis.read(buf, 0, bufferSize)) > 0) {
-os.write(buf, 0, len);
+  try {
+int bufferSize = 65536;
+byte[] buf = new byte[bufferSize];
+int len;
+while ((len = fis.read(buf, 0, bufferSize)) > 0) {
+  os.write(buf, 0, len);
+}
+os.flush();
+  } finally {
+IOUtils.closeQuietly(fis);
   }
-  os.flush();
 }
   };
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5210 NPE in Distributed Shell while publishing DS_CONTAINER_START event (Varun Saxena via Vrushali C)

2016-06-09 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 434e898a4 -> c39b9c4c9


YARN-5210 NPE in Distributed Shell while publishing DS_CONTAINER_START event 
(Varun Saxena via Vrushali C)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c39b9c4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c39b9c4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c39b9c4c

Branch: refs/heads/YARN-2928
Commit: c39b9c4c95c4d6677a121c4a1fdebdc7d2edcbf4
Parents: 434e898
Author: Vrushali Channapattan 
Authored: Thu Jun 9 11:36:52 2016 -0700
Committer: Vrushali Channapattan 
Committed: Thu Jun 9 11:36:52 2016 -0700

--
 .../distributedshell/ApplicationMaster.java | 23 ++--
 1 file changed, 12 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39b9c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index dbe9c64..851bafe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -978,16 +978,13 @@ public class ApplicationMaster {
   }
   Container container = containers.get(containerId);
   if (container != null) {
-applicationMaster.nmClientAsync.getContainerStatusAsync(containerId, 
container.getNodeId());
+applicationMaster.nmClientAsync.getContainerStatusAsync(
+containerId, container.getNodeId());
   }
   if(applicationMaster.timelineClient != null) {
-applicationMaster.publishContainerStartEvent(
-applicationMaster.timelineClient, container,
-applicationMaster.domainId, applicationMaster.appSubmitterUgi);
-
 if (applicationMaster.timelineServiceV2) {
-applicationMaster.publishContainerStartEventOnTimelineServiceV2(
-container);
+  applicationMaster.publishContainerStartEventOnTimelineServiceV2(
+  container);
 } else {
   applicationMaster.publishContainerStartEvent(
 applicationMaster.timelineClient, container,
@@ -1355,12 +1352,13 @@ public class ApplicationMaster {
 new 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity();
 entity.setId(container.getId().toString());
 entity.setType(DSEntity.DS_CONTAINER.toString());
-//entity.setDomainId(domainId);
+long ts = System.currentTimeMillis();
+entity.setCreatedTime(ts);
 entity.addInfo("user", appSubmitterUgi.getShortUserName());
 
 org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent event =
 new org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent();
-event.setTimestamp(System.currentTimeMillis());
+event.setTimestamp(ts);
 event.setId(DSEvent.DS_CONTAINER_START.toString());
 event.addInfo("Node", container.getNodeId().toString());
 event.addInfo("Resources", container.getResource().toString());
@@ -1418,12 +1416,15 @@ public class ApplicationMaster {
 new 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity();
 entity.setId(appAttemptID.toString());
 entity.setType(DSEntity.DS_APP_ATTEMPT.toString());
-//entity.setDomainId(domainId);
+long ts = System.currentTimeMillis();
+if (appEvent == DSEvent.DS_APP_ATTEMPT_START) {
+  entity.setCreatedTime(ts);
+}
 entity.addInfo("user", appSubmitterUgi.getShortUserName());
 org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent event =
 new org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent();
 event.setId(appEvent.toString());
-event.setTimestamp(System.currentTimeMillis());
+event.setTimestamp(ts);
 entity.addEvent(event);
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, 

[2/2] hadoop git commit: YARN-4837. User facing aspects of 'AM blacklisting' feature need fixing. (vinodkv via wangda)

2016-06-09 Thread wangda
YARN-4837. User facing aspects of 'AM blacklisting' feature need fixing. 
(vinodkv via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aef6e455
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aef6e455
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aef6e455

Branch: refs/heads/branch-2
Commit: aef6e455b12f818ccd0de0468c1ec6fbe213c5d8
Parents: df29f77
Author: Wangda Tan 
Authored: Thu Jun 9 10:08:31 2016 -0700
Committer: Wangda Tan 
Committed: Thu Jun 9 10:08:31 2016 -0700

--
 .../yarn/api/records/AMBlackListingRequest.java |  67 -
 .../records/ApplicationSubmissionContext.java   |  23 --
 .../hadoop/yarn/conf/YarnConfiguration.java |  25 +-
 .../src/main/proto/yarn_protos.proto|   1 -
 .../yarn/conf/TestYarnConfigurationFields.java  |   7 +
 .../impl/pb/AMBlackListingRequestPBImpl.java| 104 
 .../pb/ApplicationSubmissionContextPBImpl.java  |  40 ---
 .../src/main/resources/yarn-default.xml |  19 --
 .../hadoop/yarn/api/TestPBImplRecords.java  |  10 -
 .../blacklist/BlacklistManager.java |   9 +-
 .../blacklist/BlacklistUpdates.java |  47 
 .../blacklist/DisabledBlacklistManager.java |  12 +-
 .../blacklist/SimpleBlacklistManager.java   |  17 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  79 ++
 .../rmapp/attempt/RMAppAttempt.java |   2 +-
 .../rmapp/attempt/RMAppAttemptImpl.java |  85 +--
 .../scheduler/AbstractYarnScheduler.java|   2 +-
 .../scheduler/AppSchedulingInfo.java|  74 +++---
 .../scheduler/SchedulerAppUtils.java|  16 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  33 ++-
 .../scheduler/capacity/CapacityScheduler.java   |  11 +-
 .../allocator/RegularContainerAllocator.java|   2 +-
 .../scheduler/fair/FSLeafQueue.java |   2 +-
 .../scheduler/fair/FairScheduler.java   |   8 +-
 .../scheduler/fifo/FifoScheduler.java   |  12 +-
 .../webapp/RMAppAttemptBlock.java   |   9 +-
 .../resourcemanager/webapp/RMAppBlock.java  |  13 +-
 .../resourcemanager/webapp/RMWebServices.java   |  21 +-
 .../webapp/dao/AMBlackListingRequestInfo.java   |  61 -
 .../webapp/dao/AppAttemptInfo.java  |   8 +-
 .../dao/ApplicationSubmissionContextInfo.java   |  13 -
 .../TestNodeBlacklistingOnAMFailures.java   | 251 +++
 .../applicationsmanager/TestAMRestart.java  | 177 +
 .../blacklist/TestBlacklistManager.java |  29 +--
 .../rmapp/TestRMAppTransitions.java |  58 -
 .../scheduler/TestAppSchedulingInfo.java|  12 +-
 .../capacity/TestCapacityScheduler.java |   8 +-
 .../scheduler/fair/TestFSAppAttempt.java|  12 +-
 .../scheduler/fair/TestFairScheduler.java   |   9 +-
 .../TestRMWebServicesAppsModification.java  |  39 ++-
 40 files changed, 536 insertions(+), 891 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aef6e455/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
deleted file mode 100644
index 4aec2ba..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMBlackListingRequest.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.api.records;
-
-import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import 

[1/2] hadoop git commit: YARN-4837. User facing aspects of 'AM blacklisting' feature need fixing. (vinodkv via wangda)

2016-06-09 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 df29f7784 -> aef6e455b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aef6e455/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
index 60b728e..e8c8bca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
@@ -42,7 +42,7 @@ public class AppAttemptInfo {
   protected String nodeId;
   protected String logsLink;
   protected String blacklistedNodes;
-  protected String rmBlacklistedNodesForAMLaunches;
+  private String nodesBlacklistedBySystem;
   protected String appAttemptId;
 
   public AppAttemptInfo() {
@@ -69,9 +69,9 @@ public class AppAttemptInfo {
 + masterContainer.getNodeHttpAddress(),
 ConverterUtils.toString(masterContainer.getId()), user);
 
-rmBlacklistedNodesForAMLaunches = StringUtils.join(
-attempt.getAMBlacklist().getBlacklistUpdates().getAdditions(),
-", ");
+nodesBlacklistedBySystem =
+StringUtils.join(attempt.getAMBlacklistManager()
+  .getBlacklistUpdates().getBlacklistAdditions(), ", ");
 if (rm.getResourceScheduler() instanceof AbstractYarnScheduler) {
   AbstractYarnScheduler ayScheduler =
   (AbstractYarnScheduler) rm.getResourceScheduler();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aef6e455/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
index 4cbe7a8..3d95ca1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java
@@ -87,9 +87,6 @@ public class ApplicationSubmissionContextInfo {
   @XmlElement(name = "reservation-id")
   String reservationId;
 
-  @XmlElement(name = "am-black-listing-requests")
-  AMBlackListingRequestInfo amBlackListingRequestInfo;
-
   public ApplicationSubmissionContextInfo() {
 applicationId = "";
 applicationName = "";
@@ -106,7 +103,6 @@ public class ApplicationSubmissionContextInfo {
 logAggregationContextInfo = null;
 attemptFailuresValidityInterval = -1;
 reservationId = "";
-amBlackListingRequestInfo = null;
   }
 
   public String getApplicationId() {
@@ -173,10 +169,6 @@ public class ApplicationSubmissionContextInfo {
 return attemptFailuresValidityInterval;
   }
 
-  public AMBlackListingRequestInfo getAMBlackListingRequestInfo() {
-return amBlackListingRequestInfo;
-  }
-
   public String getReservationId() {
 return reservationId;
   }
@@ -252,9 +244,4 @@ public class ApplicationSubmissionContextInfo {
   public void setReservationId(String reservationId) {
 this.reservationId = reservationId;
   }
-
-  public void setAMBlackListingRequestInfo(
-  AMBlackListingRequestInfo amBlackListingRequestInfo) {
-this.amBlackListingRequestInfo = amBlackListingRequestInfo;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aef6e455/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
--
diff --git 

[2/3] hadoop git commit: HADOOP-13237: s3a initialization against public bucket fails if caller lacks any credentials. Contributed by Chris Nauroth

2016-06-09 Thread stevel
HADOOP-13237: s3a initialization against public bucket fails if caller lacks 
any credentials. Contributed by Chris Nauroth


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e09601a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e09601a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e09601a

Branch: refs/heads/branch-2.8
Commit: 7e09601a90303874f37c647d860a217bffe85311
Parents: 3b2a25b
Author: Steve Loughran 
Authored: Thu Jun 9 16:36:27 2016 +0100
Committer: Steve Loughran 
Committed: Thu Jun 9 16:36:51 2016 +0100

--
 .../src/main/resources/core-default.xml | 13 -
 .../fs/s3a/AnonymousAWSCredentialsProvider.java | 11 
 .../fs/s3a/BasicAWSCredentialsProvider.java |  8 +++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 22 +---
 .../src/site/markdown/tools/hadoop-aws/index.md | 14 -
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   | 55 
 6 files changed, 113 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e09601a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 3294bb5..7963f33 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -746,7 +746,18 @@
 
 
   fs.s3a.aws.credentials.provider
-  Class name of a credentials provider that implements 
com.amazonaws.auth.AWSCredentialsProvider. Omit if using access/secret keys or 
another authentication mechanism.
+  
+Class name of a credentials provider that implements
+com.amazonaws.auth.AWSCredentialsProvider.  Omit if using access/secret 
keys
+or another authentication mechanism.  The specified class must provide an
+accessible constructor accepting java.net.URI and
+org.apache.hadoop.conf.Configuration, or an accessible default constructor.
+Specifying org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider allows
+anonymous access to a publicly accessible S3 bucket without any 
credentials.
+Please note that allowing anonymous access to an S3 bucket compromises
+security and therefore is unsuitable for most use cases.  It can be useful
+for accessing public data sets without requiring AWS credentials.
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e09601a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
index e62ec77..2c863fc 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
@@ -24,6 +24,17 @@ import com.amazonaws.auth.AWSCredentials;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+/**
+ * AnonymousAWSCredentialsProvider supports anonymous access to AWS services
+ * through the AWS SDK.  AWS requests will not be signed.  This is not suitable
+ * for most cases, because allowing anonymous access to an S3 bucket 
compromises
+ * security.  This can be useful for accessing public data sets without
+ * requiring AWS credentials.
+ *
+ * Please note that users may reference this class name from configuration
+ * property fs.s3a.aws.credentials.provider.  Therefore, changing the class 
name
+ * would be a backward-incompatible change.
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public class AnonymousAWSCredentialsProvider implements AWSCredentialsProvider 
{

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e09601a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
index 2f721e4..3a5ee8c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
+++ 

[3/3] hadoop git commit: HADOOP-13237: s3a initialization against public bucket fails if caller lacks any credentials. Contributed by Chris Nauroth

2016-06-09 Thread stevel
HADOOP-13237: s3a initialization against public bucket fails if caller lacks 
any credentials. Contributed by Chris Nauroth


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/656c460c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/656c460c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/656c460c

Branch: refs/heads/trunk
Commit: 656c460c0e79ee144d6ef48d85cec04a1af3b2cc
Parents: 8ea9bbc
Author: Steve Loughran 
Authored: Thu Jun 9 16:36:27 2016 +0100
Committer: Steve Loughran 
Committed: Thu Jun 9 17:28:49 2016 +0100

--
 .../src/main/resources/core-default.xml | 13 -
 .../fs/s3a/AnonymousAWSCredentialsProvider.java | 11 
 .../fs/s3a/BasicAWSCredentialsProvider.java |  8 +++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 22 +---
 .../src/site/markdown/tools/hadoop-aws/index.md | 14 -
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   | 55 
 6 files changed, 113 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/656c460c/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index a65246b..8bb27ea 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -791,7 +791,18 @@
 
 
   fs.s3a.aws.credentials.provider
-  Class name of a credentials provider that implements 
com.amazonaws.auth.AWSCredentialsProvider. Omit if using access/secret keys or 
another authentication mechanism.
+  
+Class name of a credentials provider that implements
+com.amazonaws.auth.AWSCredentialsProvider.  Omit if using access/secret 
keys
+or another authentication mechanism.  The specified class must provide an
+accessible constructor accepting java.net.URI and
+org.apache.hadoop.conf.Configuration, or an accessible default constructor.
+Specifying org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider allows
+anonymous access to a publicly accessible S3 bucket without any 
credentials.
+Please note that allowing anonymous access to an S3 bucket compromises
+security and therefore is unsuitable for most use cases.  It can be useful
+for accessing public data sets without requiring AWS credentials.
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/656c460c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
index e62ec77..2c863fc 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
@@ -24,6 +24,17 @@ import com.amazonaws.auth.AWSCredentials;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+/**
+ * AnonymousAWSCredentialsProvider supports anonymous access to AWS services
+ * through the AWS SDK.  AWS requests will not be signed.  This is not suitable
+ * for most cases, because allowing anonymous access to an S3 bucket 
compromises
+ * security.  This can be useful for accessing public data sets without
+ * requiring AWS credentials.
+ *
+ * Please note that users may reference this class name from configuration
+ * property fs.s3a.aws.credentials.provider.  Therefore, changing the class 
name
+ * would be a backward-incompatible change.
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public class AnonymousAWSCredentialsProvider implements AWSCredentialsProvider 
{

http://git-wip-us.apache.org/repos/asf/hadoop/blob/656c460c/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
index 2f721e4..3a5ee8c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
+++ 

[1/3] hadoop git commit: HADOOP-13237: s3a initialization against public bucket fails if caller lacks any credentials. Contributed by Chris Nauroth

2016-06-09 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a086fd904 -> df29f7784
  refs/heads/branch-2.8 3b2a25bc4 -> 7e09601a9
  refs/heads/trunk 8ea9bbce2 -> 656c460c0


HADOOP-13237: s3a initialization against public bucket fails if caller lacks 
any credentials. Contributed by Chris Nauroth


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df29f778
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df29f778
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df29f778

Branch: refs/heads/branch-2
Commit: df29f7784641f8d0f48124567bf4297a53168b64
Parents: a086fd9
Author: Steve Loughran 
Authored: Thu Jun 9 16:36:27 2016 +0100
Committer: Steve Loughran 
Committed: Thu Jun 9 16:36:27 2016 +0100

--
 .../src/main/resources/core-default.xml | 13 -
 .../fs/s3a/AnonymousAWSCredentialsProvider.java | 11 
 .../fs/s3a/BasicAWSCredentialsProvider.java |  8 +++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 22 +---
 .../src/site/markdown/tools/hadoop-aws/index.md | 14 -
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   | 55 
 6 files changed, 113 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df29f778/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index b84741a..95582a8 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -784,7 +784,18 @@
 
 
   fs.s3a.aws.credentials.provider
-  Class name of a credentials provider that implements 
com.amazonaws.auth.AWSCredentialsProvider. Omit if using access/secret keys or 
another authentication mechanism.
+  
+Class name of a credentials provider that implements
+com.amazonaws.auth.AWSCredentialsProvider.  Omit if using access/secret 
keys
+or another authentication mechanism.  The specified class must provide an
+accessible constructor accepting java.net.URI and
+org.apache.hadoop.conf.Configuration, or an accessible default constructor.
+Specifying org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider allows
+anonymous access to a publicly accessible S3 bucket without any 
credentials.
+Please note that allowing anonymous access to an S3 bucket compromises
+security and therefore is unsuitable for most use cases.  It can be useful
+for accessing public data sets without requiring AWS credentials.
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df29f778/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
index e62ec77..2c863fc 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AnonymousAWSCredentialsProvider.java
@@ -24,6 +24,17 @@ import com.amazonaws.auth.AWSCredentials;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+/**
+ * AnonymousAWSCredentialsProvider supports anonymous access to AWS services
+ * through the AWS SDK.  AWS requests will not be signed.  This is not suitable
+ * for most cases, because allowing anonymous access to an S3 bucket 
compromises
+ * security.  This can be useful for accessing public data sets without
+ * requiring AWS credentials.
+ *
+ * Please note that users may reference this class name from configuration
+ * property fs.s3a.aws.credentials.provider.  Therefore, changing the class 
name
+ * would be a backward-incompatible change.
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public class AnonymousAWSCredentialsProvider implements AWSCredentialsProvider 
{

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df29f778/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
index 2f721e4..3a5ee8c 100644
--- 

hadoop git commit: Added missing entries to hadoop-yarn-project/CHANGES.txt for YARN-5069 YARN-5009 YARN-5008 YARN-4751 YARN-3362

2016-06-09 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 ab0679ed3 -> 577466c1f


Added missing entries to hadoop-yarn-project/CHANGES.txt for YARN-5069 
YARN-5009 YARN-5008 YARN-4751 YARN-3362


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/577466c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/577466c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/577466c1

Branch: refs/heads/branch-2.7
Commit: 577466c1f20dd0d04892698f65a29400aeeab530
Parents: ab0679e
Author: Eric Payne 
Authored: Thu Jun 9 16:28:37 2016 +
Committer: Eric Payne 
Committed: Thu Jun 9 16:28:37 2016 +

--
 hadoop-yarn-project/CHANGES.txt | 15 +++
 1 file changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/577466c1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f4ee662..eea86db 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -151,6 +151,21 @@ Release 2.7.3 - UNRELEASED
 YARN-4288. Fixed RMProxy to retry on IOException from local host.
 (Junping Du via jianhe)
 
+YARN-5069. TestFifoScheduler.testResourceOverCommit race condition.
+(Eric Badger via epayne)
+
+YARN-5009. NMLeveldbStateStoreService database can grow substantially
+leading to longer recovery times. (Jason Lowe via jianhe)
+
+YARN-5008. LeveldbRMStateStore database can grow substantially leading to
+long recovery times. (Jason Lowe via jianhe)
+
+YARN-4751. In 2.7, Labeled queue usage not shown properly in capacity
+scheduler UI. (Eric Payne via naganarasimha_gr)
+
+YARN-3362. Add node label usage in RM CapacityScheduler web UI.
+(Eric Payne via naganarasimha_gr)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-12743. Fix git environment check during test-patch (aw)

2016-06-09 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0b7388b5d -> a086fd904


HADOOP-12743. Fix git environment check during test-patch (aw)

(cherry picked from commit d323639686eab28f1510031e52e4390f82d78989)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a086fd90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a086fd90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a086fd90

Branch: refs/heads/branch-2
Commit: a086fd90408c0dca13d22d66ab528a68b4f57f42
Parents: 0b7388b
Author: Allen Wittenauer 
Authored: Tue Jan 26 15:46:57 2016 -0800
Committer: Akira Ajisaka 
Committed: Thu Jun 9 23:27:07 2016 +0900

--
 dev-support/bin/yetus-wrapper | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a086fd90/dev-support/bin/yetus-wrapper
--
diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper
index 4b1e21e..ddcc7a5 100755
--- a/dev-support/bin/yetus-wrapper
+++ b/dev-support/bin/yetus-wrapper
@@ -165,6 +165,7 @@ if [[ $? != 0 ]]; then
 fi
 
 if [[ -x "${HADOOP_PATCHPROCESS}/yetus-${HADOOP_YETUS_VERSION}/bin/${WANTED}" 
]]; then
+  popd >/dev/null
   exec "${HADOOP_PATCHPROCESS}/yetus-${HADOOP_YETUS_VERSION}/bin/${WANTED}" 
"${ARGV[@]}"
 fi
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-12743. Fix git environment check during test-patch (aw)

2016-06-09 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 f583fa5f4 -> 3b2a25bc4


HADOOP-12743. Fix git environment check during test-patch (aw)

(cherry picked from commit d323639686eab28f1510031e52e4390f82d78989)
(cherry picked from commit a086fd90408c0dca13d22d66ab528a68b4f57f42)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b2a25bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b2a25bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b2a25bc

Branch: refs/heads/branch-2.8
Commit: 3b2a25bc4b040ba87ca6aa17b160e007bb8d931e
Parents: f583fa5
Author: Allen Wittenauer 
Authored: Tue Jan 26 15:46:57 2016 -0800
Committer: Akira Ajisaka 
Committed: Thu Jun 9 23:27:23 2016 +0900

--
 dev-support/bin/yetus-wrapper | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b2a25bc/dev-support/bin/yetus-wrapper
--
diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper
index f0eee9e..760cc53 100755
--- a/dev-support/bin/yetus-wrapper
+++ b/dev-support/bin/yetus-wrapper
@@ -165,6 +165,7 @@ if [[ $? != 0 ]]; then
 fi
 
 if [[ -x "${HADOOP_PATCHPROCESS}/yetus-${HADOOP_YETUS_VERSION}/bin/${WANTED}" 
]]; then
+  popd >/dev/null
   exec "${HADOOP_PATCHPROCESS}/yetus-${HADOOP_YETUS_VERSION}/bin/${WANTED}" 
"${ARGV[@]}"
 fi
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   >