[hadoop] Git Push Summary

2017-03-13 Thread junping_du
Repository: hadoop
Updated Tags:  refs/tags/release-2.8.0-RC2 [created] b203c6db5

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11482. Add storage type demand to into DFSNetworkTopology#chooseRandom. Contributed by Chen Liang.

2017-03-13 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk 55796a094 -> 9832ae0ed


HDFS-11482. Add storage type demand to into DFSNetworkTopology#chooseRandom. 
Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9832ae0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9832ae0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9832ae0e

Branch: refs/heads/trunk
Commit: 9832ae0ed8853d29072c9ea7031cd2373e6b16f9
Parents: 55796a0
Author: Chen Liang 
Authored: Mon Mar 13 17:30:10 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Mar 13 17:30:10 2017 -0700

--
 .../org/apache/hadoop/net/InnerNodeImpl.java|   8 +-
 .../net/NetworkTopologyWithNodeGroup.java   |   2 +-
 .../hadoop/hdfs/net/DFSNetworkTopology.java | 289 
 .../hadoop/hdfs/net/DFSTopologyNodeImpl.java| 275 
 .../blockmanagement/DatanodeDescriptor.java |   9 +
 .../apache/hadoop/hdfs/DFSNetworkTopology.java  |  36 --
 .../apache/hadoop/hdfs/DFSTopologyNodeImpl.java | 253 ---
 .../hadoop/hdfs/TestDFSNetworkTopology.java | 260 ---
 .../hadoop/hdfs/net/TestDFSNetworkTopology.java | 449 +++
 9 files changed, 1027 insertions(+), 554 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9832ae0e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
index 81eaf7f..5a2931b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
@@ -63,7 +63,7 @@ public class InnerNodeImpl extends NodeBase implements 
InnerNode {
   /** Judge if this node represents a rack
* @return true if it has no child or its children are not InnerNodes
*/
-  boolean isRack() {
+  public boolean isRack() {
 if (children.isEmpty()) {
   return true;
 }
@@ -81,7 +81,7 @@ public class InnerNodeImpl extends NodeBase implements 
InnerNode {
* @param n a node
* @return true if this node is an ancestor of n
*/
-  protected boolean isAncestor(Node n) {
+  public boolean isAncestor(Node n) {
 return getPath(this).equals(NodeBase.PATH_SEPARATOR_STR) ||
   (n.getNetworkLocation()+NodeBase.PATH_SEPARATOR_STR).
   startsWith(getPath(this)+NodeBase.PATH_SEPARATOR_STR);
@@ -92,12 +92,12 @@ public class InnerNodeImpl extends NodeBase implements 
InnerNode {
* @param n a node
* @return true if this node is the parent of n
*/
-  protected boolean isParent(Node n) {
+  public boolean isParent(Node n) {
 return n.getNetworkLocation().equals(getPath(this));
   }
 
   /* Return a child name of this node who is an ancestor of node n */
-  protected String getNextAncestorName(Node n) {
+  public String getNextAncestorName(Node n) {
 if (!isAncestor(n)) {
   throw new IllegalArgumentException(
  this + "is not an ancestor of " + n);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9832ae0e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
index a20d5fc..bec0fe1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
@@ -308,7 +308,7 @@ public class NetworkTopologyWithNodeGroup extends 
NetworkTopology {
 }
 
 @Override
-boolean isRack() {
+public boolean isRack() {
   // it is node group
   if (getChildren().isEmpty()) {
 return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9832ae0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
new file mode 100644
index 000..ee83dba
--- /dev/null
+++ 

hadoop git commit: HDFS-11395. RequestHedgingProxyProvider#RequestHedgingInvocationHandler hides the Exception thrown from NameNode. Contributed by Nandakumar.

2017-03-13 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0da850a47 -> e03d8ff48


HDFS-11395. RequestHedgingProxyProvider#RequestHedgingInvocationHandler hides 
the Exception thrown from NameNode. Contributed by Nandakumar.

(cherry picked from commit 55796a0946f80a35055701a34379e374399009c5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e03d8ff4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e03d8ff4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e03d8ff4

Branch: refs/heads/branch-2
Commit: e03d8ff488c5818fbe167fa0143c252d7b6287f0
Parents: 0da850a
Author: Jing Zhao 
Authored: Mon Mar 13 14:14:09 2017 -0700
Committer: Jing Zhao 
Committed: Mon Mar 13 14:26:44 2017 -0700

--
 .../hadoop/io/retry/RetryInvocationHandler.java |  17 ++-
 .../ha/RequestHedgingProxyProvider.java |  41 +--
 .../ha/TestRequestHedgingProxyProvider.java | 108 ++-
 3 files changed, 151 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e03d8ff4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 8487602..ffdd928 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -240,12 +240,15 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 private final long delay;
 private final RetryAction action;
 private final long expectedFailoverCount;
+private final Exception failException;
 
-RetryInfo(long delay, RetryAction action, long expectedFailoverCount) {
+RetryInfo(long delay, RetryAction action, long expectedFailoverCount,
+Exception failException) {
   this.delay = delay;
   this.retryTime = Time.monotonicNow() + delay;
   this.action = action;
   this.expectedFailoverCount = expectedFailoverCount;
+  this.failException = failException;
 }
 
 boolean isFailover() {
@@ -258,11 +261,16 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
   && action.action ==  RetryAction.RetryDecision.FAIL;
 }
 
+Exception getFailException() {
+  return failException;
+}
+
 static RetryInfo newRetryInfo(RetryPolicy policy, Exception e,
 Counters counters, boolean idempotentOrAtMostOnce,
 long expectedFailoverCount) throws Exception {
   RetryAction max = null;
   long maxRetryDelay = 0;
+  Exception ex = null;
 
   final Iterable exceptions = e instanceof MultiException ?
   ((MultiException) e).getExceptions().values()
@@ -279,10 +287,13 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 
 if (max == null || max.action.compareTo(a.action) < 0) {
   max = a;
+  if (a.action == RetryAction.RetryDecision.FAIL) {
+ex = exception;
+  }
 }
   }
 
-  return new RetryInfo(maxRetryDelay, max, expectedFailoverCount);
+  return new RetryInfo(maxRetryDelay, max, expectedFailoverCount, ex);
 }
   }
 
@@ -359,7 +370,7 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
   + ". Not retrying because " + retryInfo.action.reason, e);
 }
   }
-  throw e;
+  throw retryInfo.getFailException();
 }
 
 log(method, retryInfo.isFailover(), counters.failovers, retryInfo.delay, 
e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e03d8ff4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 945e92f..a765e95 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import java.io.IOException;
 import 

hadoop git commit: HDFS-11395. RequestHedgingProxyProvider#RequestHedgingInvocationHandler hides the Exception thrown from NameNode. Contributed by Nandakumar.

2017-03-13 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk b8c69557b -> 55796a094


HDFS-11395. RequestHedgingProxyProvider#RequestHedgingInvocationHandler hides 
the Exception thrown from NameNode. Contributed by Nandakumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55796a09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55796a09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55796a09

Branch: refs/heads/trunk
Commit: 55796a0946f80a35055701a34379e374399009c5
Parents: b8c6955
Author: Jing Zhao 
Authored: Mon Mar 13 14:14:09 2017 -0700
Committer: Jing Zhao 
Committed: Mon Mar 13 14:24:51 2017 -0700

--
 .../hadoop/io/retry/RetryInvocationHandler.java |  17 ++-
 .../ha/RequestHedgingProxyProvider.java |  41 +--
 .../ha/TestRequestHedgingProxyProvider.java | 108 ++-
 3 files changed, 151 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55796a09/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 8487602..ffdd928 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -240,12 +240,15 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 private final long delay;
 private final RetryAction action;
 private final long expectedFailoverCount;
+private final Exception failException;
 
-RetryInfo(long delay, RetryAction action, long expectedFailoverCount) {
+RetryInfo(long delay, RetryAction action, long expectedFailoverCount,
+Exception failException) {
   this.delay = delay;
   this.retryTime = Time.monotonicNow() + delay;
   this.action = action;
   this.expectedFailoverCount = expectedFailoverCount;
+  this.failException = failException;
 }
 
 boolean isFailover() {
@@ -258,11 +261,16 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
   && action.action ==  RetryAction.RetryDecision.FAIL;
 }
 
+Exception getFailException() {
+  return failException;
+}
+
 static RetryInfo newRetryInfo(RetryPolicy policy, Exception e,
 Counters counters, boolean idempotentOrAtMostOnce,
 long expectedFailoverCount) throws Exception {
   RetryAction max = null;
   long maxRetryDelay = 0;
+  Exception ex = null;
 
   final Iterable exceptions = e instanceof MultiException ?
   ((MultiException) e).getExceptions().values()
@@ -279,10 +287,13 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 
 if (max == null || max.action.compareTo(a.action) < 0) {
   max = a;
+  if (a.action == RetryAction.RetryDecision.FAIL) {
+ex = exception;
+  }
 }
   }
 
-  return new RetryInfo(maxRetryDelay, max, expectedFailoverCount);
+  return new RetryInfo(maxRetryDelay, max, expectedFailoverCount, ex);
 }
   }
 
@@ -359,7 +370,7 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
   + ". Not retrying because " + retryInfo.action.reason, e);
 }
   }
-  throw e;
+  throw retryInfo.getFailException();
 }
 
 log(method, retryInfo.isFailover(), counters.failovers, retryInfo.delay, 
e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55796a09/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 945e92f..a765e95 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import java.io.IOException;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Method;
 import 

hadoop git commit: HADOOP-14170. FileSystemContractBaseTest is not cleaning up test directory clearly. Contributed by Mingliang Liu

2017-03-13 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5a40bafda -> b8c69557b


HADOOP-14170. FileSystemContractBaseTest is not cleaning up test directory 
clearly. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8c69557
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8c69557
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8c69557

Branch: refs/heads/trunk
Commit: b8c69557b7a23ff9c4c0b2c9d595338a08b873f1
Parents: 5a40baf
Author: Mingliang Liu 
Authored: Fri Mar 10 18:44:27 2017 -0800
Committer: Mingliang Liu 
Committed: Mon Mar 13 14:15:02 2017 -0700

--
 .../hadoop/fs/FileSystemContractBaseTest.java   | 247 ---
 .../fs/TestRawLocalFileSystemContract.java  |  24 +-
 .../fs/s3a/ITestS3AFileSystemContract.java  |  39 +--
 3 files changed, 178 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8c69557/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 6247959..78ba1f9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -24,8 +24,9 @@ import java.util.ArrayList;
 
 import junit.framework.TestCase;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
@@ -45,8 +46,8 @@ import org.apache.hadoop.util.StringUtils;
  * 
  */
 public abstract class FileSystemContractBaseTest extends TestCase {
-  private static final Log LOG =
-LogFactory.getLog(FileSystemContractBaseTest.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FileSystemContractBaseTest.class);
 
   protected final static String TEST_UMASK = "062";
   protected FileSystem fs;
@@ -54,15 +55,46 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 
   @Override
   protected void tearDown() throws Exception {
-try {
-  if (fs != null) {
-fs.delete(path("/test"), true);
+if (fs != null) {
+  // some cases use this absolute path
+  if (rootDirTestEnabled()) {
+cleanupDir(path("/FileSystemContractBaseTest"));
   }
+  // others use this relative path against test base directory
+  cleanupDir(getTestBaseDir());
+}
+super.tearDown();
+  }
+
+  private void cleanupDir(Path p) {
+try {
+  LOG.info("Deleting " + p);
+  fs.delete(p, true);
 } catch (IOException e) {
-  LOG.error("Error deleting /test: " + e, e);
+  LOG.error("Error deleting test dir: " + p, e);
 }
   }
-  
+
+  /**
+   * Test base directory for resolving relative test paths.
+   *
+   * The default value is /user/$USER/FileSystemContractBaseTest. Subclass may
+   * set specific test base directory.
+   */
+  protected Path getTestBaseDir() {
+return new Path(fs.getWorkingDirectory(), "FileSystemContractBaseTest");
+  }
+
+  /**
+   * For absolute path return the fully qualified path while for relative path
+   * return the fully qualified path against {@link #getTestBaseDir()}.
+   */
+  protected final Path path(String pathString) {
+Path p = new Path(pathString).makeQualified(fs.getUri(), getTestBaseDir());
+LOG.info("Resolving {} -> {}", pathString, p);
+return p;
+  }
+
   protected int getBlockSize() {
 return 1024;
   }
@@ -81,6 +113,17 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
   }
 
   /**
+   * Override this if the filesystem does not enable testing root directories.
+   *
+   * If this returns true, the test will create and delete test directories and
+   * files under root directory, which may have side effects, e.g. fail tests
+   * with PermissionDenied exceptions.
+   */
+  protected boolean rootDirTestEnabled() {
+return true;
+  }
+
+  /**
* Override this if the filesystem is not case sensitive
* @return true if the case detection/preservation tests should run
*/
@@ -102,24 +145,24 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 Path workDir = path(getDefaultWorkingDirectory());
 assertEquals(workDir, fs.getWorkingDirectory());
 

hadoop git commit: HDFS-11499. Decommissioning stuck because of failing recovery. Contributed by Lukas Majercak and Manoj Govindassamy.

2017-03-13 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 ef99e5ed8 -> 830a60237


HDFS-11499. Decommissioning stuck because of failing recovery. Contributed by 
Lukas Majercak and Manoj Govindassamy.

(cherry picked from commit 385d2cb777a0272ac20c62336c944fad295d5d12)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

(cherry picked from commit 60be2e5d8a1a6a8921c68f8b0f428b55152d05db)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/830a6023
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/830a6023
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/830a6023

Branch: refs/heads/branch-2.7
Commit: 830a602375ee4055c84b998734290ded78b68d70
Parents: ef99e5e
Author: Wei-Chiu Chuang 
Authored: Mon Mar 13 13:45:12 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Mar 13 13:45:12 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/BlockManager.java|  7 ++-
 .../apache/hadoop/hdfs/TestDecommission.java| 48 
 3 files changed, 57 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/830a6023/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3234fc2..fb3186f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -229,6 +229,9 @@ Release 2.7.4 - UNRELEASED
 
 HDFS-11379. DFSInputStream may infinite loop requesting block locations. 
Contributed by Daryn Sharp.
 
+HDFS-11499. Decommissioning stuck because of failing recovery.
+Contributed by Lukas Majercak and Manoj Govindassamy.
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/830a6023/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index b4b5b5f..cc6c881 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -640,7 +640,12 @@ public class BlockManager {
 
 final boolean b = commitBlock(
 (BlockInfoContiguousUnderConstruction) lastBlock, commitBlock);
-if(countNodes(lastBlock).liveReplicas() >= minReplication)
+
+// Count replicas on decommissioning nodes, as these will not be
+// decommissioned unless recovery/completing last block has finished
+NumberReplicas numReplicas = countNodes(lastBlock);
+if(numReplicas.liveReplicas() + numReplicas.decommissioning() >=
+minReplication)
   completeBlock(bc, bc.numBlocks()-1, iip, false);
 return b;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/830a6023/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 5e892d7..7d8cc59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -869,6 +869,54 @@ public class TestDecommission {
 
 fdos.close();
   }
+
+  @Test(timeout = 36)
+  public void testDecommissionWithOpenFileAndBlockRecovery()
+  throws IOException, InterruptedException {
+startCluster(1, 6, conf);
+cluster.waitActive();
+
+Path file = new Path("/testRecoveryDecommission");
+
+// Create a file and never close the output stream to trigger recovery
+DistributedFileSystem dfs = cluster.getFileSystem();
+FSNamesystem ns = cluster.getNamesystem(0);
+FSDataOutputStream out = dfs.create(file, true,
+conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
+(short) 3, blockSize);
+
+// Write data to the file
+long 

hadoop git commit: YARN-6318. timeline service schema creator fails if executed from a remote machine (Sangjin Lee via Varun Saxena)

2017-03-13 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355-branch-2 55ca1edb3 -> 62e6d52be


YARN-6318. timeline service schema creator fails if executed from a remote 
machine (Sangjin Lee via Varun Saxena)

(cherry picked from commit 68ec0d9b471356d3adef15d7826f57ee50f8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62e6d52b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62e6d52b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62e6d52b

Branch: refs/heads/YARN-5355-branch-2
Commit: 62e6d52be6be22d39cee46a4e6357eaaef3057ce
Parents: 55ca1ed
Author: Varun Saxena 
Authored: Tue Mar 14 02:05:01 2017 +0530
Committer: Varun Saxena 
Committed: Tue Mar 14 02:09:37 2017 +0530

--
 .../storage/TimelineSchemaCreator.java  |  5 ++-
 .../common/HBaseTimelineStorageUtils.java   | 29 ++---
 .../common/TestHBaseTimelineStorageUtils.java   | 33 
 3 files changed, 54 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62e6d52b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
index dd87169..a4c1bbb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
@@ -67,8 +68,10 @@ public final class TimelineSchemaCreator {
 
   public static void main(String[] args) throws Exception {
 
+LOG.info("Starting the schema creation");
 Configuration hbaseConf =
-HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(null);
+HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(
+new YarnConfiguration());
 // Grab input args and allow for -Dxyz style arguments
 String[] otherArgs = new GenericOptionsParser(hbaseConf, args)
 .getRemainingArgs();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62e6d52b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
index afe4d6a..865a70d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
@@ -17,6 +17,15 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.storage.common;
 
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import 

hadoop git commit: HDFS-11499. Decommissioning stuck because of failing recovery. Contributed by Lukas Majercak and Manoj Govindassamy.

2017-03-13 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 72fc7e052 -> 851ba7d9d


HDFS-11499. Decommissioning stuck because of failing recovery. Contributed by 
Lukas Majercak and Manoj Govindassamy.

(cherry picked from commit 385d2cb777a0272ac20c62336c944fad295d5d12)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

(cherry picked from commit 60be2e5d8a1a6a8921c68f8b0f428b55152d05db)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/851ba7d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/851ba7d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/851ba7d9

Branch: refs/heads/branch-2.8
Commit: 851ba7d9d1a7a0b8a0bd86d3ad14bffc781a0316
Parents: 72fc7e0
Author: Wei-Chiu Chuang 
Authored: Mon Mar 13 13:41:13 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Mar 13 13:43:00 2017 -0700

--
 .../server/blockmanagement/BlockManager.java|  7 ++-
 .../apache/hadoop/hdfs/TestDecommission.java| 48 
 2 files changed, 54 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/851ba7d9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a929c43..858a54f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -736,7 +736,12 @@ public class BlockManager implements BlockStatsMXBean {
   return false; // already completed (e.g. by syncBlock)
 
 final boolean b = commitBlock(lastBlock, commitBlock);
-if (countNodes(lastBlock).liveReplicas() >= minReplication) {
+
+// Count replicas on decommissioning nodes, as these will not be
+// decommissioned unless recovery/completing last block has finished
+NumberReplicas numReplicas = countNodes(lastBlock);
+if (numReplicas.liveReplicas() + numReplicas.decommissioning() >=
+minReplication) {
   if (b) {
 addExpectedReplicasToPending(lastBlock, bc);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/851ba7d9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 1d5ebbf..78f6221 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -873,6 +873,54 @@ public class TestDecommission {
 
 fdos.close();
   }
+
+  @Test(timeout = 36)
+  public void testDecommissionWithOpenFileAndBlockRecovery()
+  throws IOException, InterruptedException {
+startCluster(1, 6, conf);
+cluster.waitActive();
+
+Path file = new Path("/testRecoveryDecommission");
+
+// Create a file and never close the output stream to trigger recovery
+DistributedFileSystem dfs = cluster.getFileSystem();
+FSNamesystem ns = cluster.getNamesystem(0);
+FSDataOutputStream out = dfs.create(file, true,
+conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
+(short) 3, blockSize);
+
+// Write data to the file
+long writtenBytes = 0;
+while (writtenBytes < fileSize) {
+  out.writeLong(writtenBytes);
+  writtenBytes += 8;
+}
+out.hsync();
+
+DatanodeInfo[] lastBlockLocations = NameNodeAdapter.getBlockLocations(
+  cluster.getNameNode(), "/testRecoveryDecommission", 0, fileSize)
+  .getLastLocatedBlock().getLocations();
+
+// Decommission all nodes of the last block
+ArrayList toDecom = new ArrayList<>();
+for (DatanodeInfo dnDecom : lastBlockLocations) {
+  toDecom.add(dnDecom.getXferAddr());
+}
+writeConfigFile(excludeFile, toDecom);
+refreshNodes(ns, conf);
+
+// Make sure hard lease expires to trigger replica recovery
+cluster.setLeasePeriod(300L, 300L);
+Thread.sleep(2 * 

hadoop git commit: YARN-6318. timeline service schema creator fails if executed from a remote machine (Sangjin Lee via Varun Saxena)

2017-03-13 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 ec32c4265 -> 68ec0d9b4


YARN-6318. timeline service schema creator fails if executed from a remote 
machine (Sangjin Lee via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68ec0d9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68ec0d9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68ec0d9b

Branch: refs/heads/YARN-5355
Commit: 68ec0d9b471356d3adef15d7826f57ee50f8
Parents: ec32c42
Author: Varun Saxena 
Authored: Tue Mar 14 02:05:01 2017 +0530
Committer: Varun Saxena 
Committed: Tue Mar 14 02:05:01 2017 +0530

--
 .../storage/TimelineSchemaCreator.java  |  5 ++-
 .../common/HBaseTimelineStorageUtils.java   | 29 ++---
 .../common/TestHBaseTimelineStorageUtils.java   | 33 
 3 files changed, 54 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ec0d9b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
index dd87169..a4c1bbb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
@@ -67,8 +68,10 @@ public final class TimelineSchemaCreator {
 
   public static void main(String[] args) throws Exception {
 
+LOG.info("Starting the schema creation");
 Configuration hbaseConf =
-HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(null);
+HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(
+new YarnConfiguration());
 // Grab input args and allow for -Dxyz style arguments
 String[] otherArgs = new GenericOptionsParser(hbaseConf, args)
 .getRemainingArgs();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ec0d9b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
index afe4d6a..865a70d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
@@ -17,6 +17,15 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.storage.common;
 
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import 

hadoop git commit: HADOOP-14173. Remove unused AdlConfKeys#ADL_EVENTS_TRACKING_SOURCE. Contributed by John Zhuge.

2017-03-13 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.0 e51312e8e -> b457b9ac7


HADOOP-14173. Remove unused AdlConfKeys#ADL_EVENTS_TRACKING_SOURCE. Contributed 
by John Zhuge.

Change-Id: I5dc6f885816b8834f718874542dfa373458b0333
(cherry picked from commit 5a40bafdaeec693e613aa02e79dbaaccfdab6f60)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b457b9ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b457b9ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b457b9ac

Branch: refs/heads/branch-2.8.0
Commit: b457b9ac7fc626ee7c30b4be15eb00e4869626f9
Parents: e51312e
Author: John Zhuge 
Authored: Mon Mar 13 09:15:44 2017 -0700
Committer: John Zhuge 
Committed: Mon Mar 13 09:15:44 2017 -0700

--
 .../src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b457b9ac/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 7d31103..8fc8e00 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -66,7 +66,6 @@ public final class AdlConfKeys {
   static final String ADL_HADOOP_CLIENT_NAME = "hadoop-azure-datalake-";
   static final String ADL_HADOOP_CLIENT_VERSION =
   "2.0.0-SNAPSHOT";
-  static final String ADL_EVENTS_TRACKING_SOURCE = 
"adl.events.tracking.source";
   static final String ADL_EVENTS_TRACKING_CLUSTERNAME =
   "adl.events.tracking.clustername";
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14173. Remove unused AdlConfKeys#ADL_EVENTS_TRACKING_SOURCE. Contributed by John Zhuge.

2017-03-13 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 1f2da39d0 -> 72fc7e052


HADOOP-14173. Remove unused AdlConfKeys#ADL_EVENTS_TRACKING_SOURCE. Contributed 
by John Zhuge.

Change-Id: I5dc6f885816b8834f718874542dfa373458b0333
(cherry picked from commit 5a40bafdaeec693e613aa02e79dbaaccfdab6f60)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72fc7e05
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72fc7e05
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72fc7e05

Branch: refs/heads/branch-2.8
Commit: 72fc7e0520c1a17e578def42261a40a984e9fd4b
Parents: 1f2da39
Author: John Zhuge 
Authored: Mon Mar 13 09:15:09 2017 -0700
Committer: John Zhuge 
Committed: Mon Mar 13 09:15:09 2017 -0700

--
 .../src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72fc7e05/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 7d31103..8fc8e00 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -66,7 +66,6 @@ public final class AdlConfKeys {
   static final String ADL_HADOOP_CLIENT_NAME = "hadoop-azure-datalake-";
   static final String ADL_HADOOP_CLIENT_VERSION =
   "2.0.0-SNAPSHOT";
-  static final String ADL_EVENTS_TRACKING_SOURCE = 
"adl.events.tracking.source";
   static final String ADL_EVENTS_TRACKING_CLUSTERNAME =
   "adl.events.tracking.clustername";
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14173. Remove unused AdlConfKeys#ADL_EVENTS_TRACKING_SOURCE. Contributed by John Zhuge.

2017-03-13 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7a65601dd -> 0da850a47


HADOOP-14173. Remove unused AdlConfKeys#ADL_EVENTS_TRACKING_SOURCE. Contributed 
by John Zhuge.

Change-Id: I5dc6f885816b8834f718874542dfa373458b0333
(cherry picked from commit 5a40bafdaeec693e613aa02e79dbaaccfdab6f60)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0da850a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0da850a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0da850a4

Branch: refs/heads/branch-2
Commit: 0da850a472dd3b2cd877ab52149cbe87233b18d7
Parents: 7a65601
Author: John Zhuge 
Authored: Mon Mar 13 08:36:46 2017 -0700
Committer: John Zhuge 
Committed: Mon Mar 13 08:36:46 2017 -0700

--
 .../src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da850a4/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 7d31103..8fc8e00 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -66,7 +66,6 @@ public final class AdlConfKeys {
   static final String ADL_HADOOP_CLIENT_NAME = "hadoop-azure-datalake-";
   static final String ADL_HADOOP_CLIENT_VERSION =
   "2.0.0-SNAPSHOT";
-  static final String ADL_EVENTS_TRACKING_SOURCE = 
"adl.events.tracking.source";
   static final String ADL_EVENTS_TRACKING_CLUSTERNAME =
   "adl.events.tracking.clustername";
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14173. Remove unused AdlConfKeys#ADL_EVENTS_TRACKING_SOURCE. Contributed by John Zhuge.

2017-03-13 Thread jzhuge
Repository: hadoop
Updated Branches:
  refs/heads/trunk 79924266f -> 5a40bafda


HADOOP-14173. Remove unused AdlConfKeys#ADL_EVENTS_TRACKING_SOURCE. Contributed 
by John Zhuge.

Change-Id: I5dc6f885816b8834f718874542dfa373458b0333


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a40bafd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a40bafd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a40bafd

Branch: refs/heads/trunk
Commit: 5a40bafdaeec693e613aa02e79dbaaccfdab6f60
Parents: 7992426
Author: John Zhuge 
Authored: Fri Mar 10 17:42:30 2017 -0800
Committer: John Zhuge 
Committed: Mon Mar 13 08:11:25 2017 -0700

--
 .../src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a40bafd/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 7d31103..8fc8e00 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -66,7 +66,6 @@ public final class AdlConfKeys {
   static final String ADL_HADOOP_CLIENT_NAME = "hadoop-azure-datalake-";
   static final String ADL_HADOOP_CLIENT_VERSION =
   "2.0.0-SNAPSHOT";
-  static final String ADL_EVENTS_TRACKING_SOURCE = 
"adl.events.tracking.source";
   static final String ADL_EVENTS_TRACKING_CLUSTERNAME =
   "adl.events.tracking.clustername";
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: HADOOP-14111 cut some obsolete, ignored s3 tests in TestS3Credentials. Contributed by Yuanbo Liu

2017-03-13 Thread stevel
HADOOP-14111 cut some obsolete, ignored s3 tests in TestS3Credentials.
Contributed by Yuanbo Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/092ec39f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/092ec39f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/092ec39f

Branch: refs/heads/HADOOP-13345
Commit: 092ec39fb9d9930d234ed1f0ec507b2f8c6ff4bc
Parents: 4478273
Author: Steve Loughran 
Authored: Fri Mar 10 17:43:22 2017 +
Committer: Steve Loughran 
Committed: Fri Mar 10 17:43:22 2017 +

--
 .../hadoop/fs/s3native/TestS3Credentials.java | 18 --
 1 file changed, 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ec39f/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
index 33d0320..17b78c7 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
@@ -28,7 +28,6 @@ import java.io.File;
 import java.net.URI;
 
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
@@ -127,21 +126,4 @@ public class TestS3Credentials {
 s3Credentials.getSecretAccessKey());
   }
 
-  @Test(expected=IllegalArgumentException.class)
-  @Ignore
-  public void noSecretShouldThrow() throws Exception {
-S3Credentials s3Credentials = new S3Credentials();
-Configuration conf = new Configuration();
-conf.set(S3_NATIVE_AWS_ACCESS_KEY_ID, EXAMPLE_ID);
-s3Credentials.initialize(new URI("s3n://foobar"), conf);
-  }
-
-  @Test(expected=IllegalArgumentException.class)
-  @Ignore
-  public void noAccessIdShouldThrow() throws Exception {
-S3Credentials s3Credentials = new S3Credentials();
-Configuration conf = new Configuration();
-conf.set(S3_NATIVE_AWS_SECRET_ACCESS_KEY, EXAMPLE_KEY);
-s3Credentials.initialize(new URI("s3n://foobar"), conf);
-  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: Revert "HADOOP-14062. ApplicationMasterProtocolPBClientImpl.allocate fails with EOFException when RPC privacy is enabled. Contributed by Steven Rand"

2017-03-13 Thread stevel
Revert "HADOOP-14062. ApplicationMasterProtocolPBClientImpl.allocate fails with 
EOFException when RPC privacy is enabled. Contributed by Steven Rand"

This reverts commit 241c1cc05b71f8b719a85c06e3df930639630726.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2be8947d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2be8947d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2be8947d

Branch: refs/heads/HADOOP-13345
Commit: 2be8947d12714c49ef7a90de82a351d086b435b6
Parents: 241c1cc
Author: Jian He 
Authored: Wed Mar 8 13:20:01 2017 -0800
Committer: Jian He 
Committed: Wed Mar 8 13:20:01 2017 -0800

--
 .../main/java/org/apache/hadoop/ipc/Client.java |  4 +---
 .../yarn/client/api/impl/TestAMRMClient.java| 24 
 2 files changed, 1 insertion(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2be8947d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index c0a5be9..70b902c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1768,9 +1768,7 @@ public class Client implements AutoCloseable {
 }
 
 void setSaslClient(SaslRpcClient client) throws IOException {
-  // Wrap the input stream in a BufferedInputStream to fill the buffer
-  // before reading its length (HADOOP-14062).
-  setInputStream(new BufferedInputStream(client.getInputStream(in)));
+  setInputStream(client.getInputStream(in));
   setOutputStream(client.getOutputStream(out));
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2be8947d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index a52963a..43c0271 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -137,11 +137,6 @@ public class TestAMRMClient {
 // set the minimum allocation so that resource decrease can go under 1024
 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
 conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
-createClientAndCluster(conf);
-  }
-
-  private static void createClientAndCluster(Configuration conf)
-  throws Exception {
 yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), 
nodeCount, 1, 1);
 yarnCluster.init(conf);
 yarnCluster.start();
@@ -867,25 +862,6 @@ public class TestAMRMClient {
   }
 
   @Test (timeout=6)
-  public void testAMRMClientWithSaslEncryption() throws Exception {
-conf.set("hadoop.rpc.protection", "privacy");
-// we have to create a new instance of MiniYARNCluster to avoid SASL qop
-// mismatches between client and server
-tearDown();
-createClientAndCluster(conf);
-startApp();
-initAMRMClientAndTest(false);
-
-// recreate the original MiniYARNCluster and YarnClient for other tests
-conf.unset("hadoop.rpc.protection");
-tearDown();
-createClientAndCluster(conf);
-// unless we start an application the cancelApp() method will fail when
-// it runs after this test
-startApp();
-  }
-
-  @Test (timeout=6)
   public void testAMRMClientAllocReqId() throws YarnException, IOException {
 initAMRMClientAndTest(true);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: HADOOP-14153. ADL module has messed doc structure. Contributed by Mingliang Liu

2017-03-13 Thread stevel
HADOOP-14153. ADL module has messed doc structure. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/881ec4d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/881ec4d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/881ec4d9

Branch: refs/heads/HADOOP-13345
Commit: 881ec4d97bd1db4582027aec3a4204156a4eda17
Parents: a96afae
Author: Mingliang Liu 
Authored: Tue Mar 7 16:29:19 2017 -0800
Committer: Mingliang Liu 
Committed: Fri Mar 10 00:16:09 2017 -0800

--
 .../src/site/markdown/index.md  | 55 
 1 file changed, 21 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/881ec4d9/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
--
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index 9355241..3a16253 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -14,28 +14,15 @@
 
 # Hadoop Azure Data Lake Support
 
-* [Introduction](#Introduction)
-* [Features](#Features)
-* [Limitations](#Limitations)
-* [Usage](#Usage)
-* [Concepts](#Concepts)
-* [OAuth2 Support](#OAuth2_Support)
-* [Configuring Credentials and FileSystem](#Configuring_Credentials)
-* [Using Refresh Token](#Refresh_Token)
-* [Using Client Keys](#Client_Credential_Token)
-* [Protecting the Credentials with Credential 
Providers](#Credential_Provider)
-* [Enabling ADL Filesystem](#Enabling_ADL)
-* [Accessing `adl` URLs](#Accessing_adl_URLs)
-* [User/Group Representation](#OIDtoUPNConfiguration)
-* [Testing the `hadoop-azure` Module](#Testing_the_hadoop-azure_Module)
-
-## Introduction
+
+
+## Introduction
 
 The `hadoop-azure-datalake` module provides support for integration with the
 [Azure Data Lake 
Store](https://azure.microsoft.com/en-in/documentation/services/data-lake-store/).
 This support comes via the JAR file `azure-datalake-store.jar`.
 
-## Features
+## Features
 
 * Read and write data stored in an Azure Data Lake Storage account.
 * Reference file system paths using URLs using the `adl` scheme for Secure 
Webhdfs i.e. SSL
@@ -46,7 +33,7 @@ This support comes via the JAR file 
`azure-datalake-store.jar`.
 * API `setOwner()`, `setAcl`, `removeAclEntries()`, `modifyAclEntries()` 
accepts UPN or OID
   (Object ID) as user and group names.
 
-## Limitations
+## Limitations
 
 Partial or no support for the following operations :
 
@@ -62,9 +49,9 @@ Partial or no support for the following operations :
 * User and group information returned as `listStatus()` and `getFileStatus()` 
is
 in the form of the GUID associated in Azure Active Directory.
 
-## Usage
+## Usage
 
-### Concepts
+### Concepts
 Azure Data Lake Storage access path syntax is:
 
 ```
@@ -74,7 +61,7 @@ adl://.azuredatalakestore.net/
 For details on using the store, see
 [**Get started with Azure Data Lake Store using the Azure 
Portal**](https://azure.microsoft.com/en-in/documentation/articles/data-lake-store-get-started-portal/)
 
-### OAuth2 Support
+ OAuth2 Support
 
 Usage of Azure Data Lake Storage requires an OAuth2 bearer token to be present 
as
 part of the HTTPS header as per the OAuth2 specification.
@@ -86,11 +73,11 @@ and identity management service. See [*What is 
ActiveDirectory*](https://azure.m
 
 Following sections describes theOAuth2 configuration in `core-site.xml`.
 
- Configuring Credentials & FileSystem
+### Configuring Credentials and FileSystem
 Credentials can be configured using either a refresh token (associated with a 
user),
 or a client credential (analogous to a service principal).
 
- Using Refresh Tokens
+ Using Refresh Tokens
 
 Add the following properties to the cluster's `core-site.xml`
 
@@ -119,9 +106,9 @@ service associated with the client id. See [*Active 
Directory Library For Java*]
 ```
 
 
-### Using Client Keys
+ Using Client Keys
 
- Generating the Service Principal
+# Generating the Service Principal
 
 1.  Go to [the portal](https://portal.azure.com)
 2.  Under "Browse", look for Active Directory and click on it.
@@ -135,13 +122,13 @@ service associated with the client id. See [*Active 
Directory Library For Java*]
 -  The token endpoint (select "View endpoints" at the bottom of the page 
and copy/paste the OAuth2 .0 Token Endpoint value)
 -  Resource: Always https://management.core.windows.net/ , for all 
customers
 
- Adding the service principal to your ADL Account
+# Adding the service principal to your ADL Account
 

[42/50] [abbrv] hadoop git commit: HADOOP-14062. ApplicationMasterProtocolPBClientImpl.allocate fails with EOFException when RPC privacy is enabled. Contributed by Steven Rand

2017-03-13 Thread stevel
HADOOP-14062. ApplicationMasterProtocolPBClientImpl.allocate fails with 
EOFException when RPC privacy is enabled. Contributed by Steven Rand


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4478273e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4478273e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4478273e

Branch: refs/heads/HADOOP-13345
Commit: 4478273e5fb731de93ff12e249a3137c38fcf46f
Parents: deb9f56
Author: Jian He 
Authored: Thu Mar 9 19:28:09 2017 -0800
Committer: Jian He 
Committed: Fri Mar 10 09:25:58 2017 -0800

--
 .../main/java/org/apache/hadoop/ipc/Client.java   |  4 +++-
 .../yarn/client/api/impl/TestAMRMClient.java  | 18 +-
 2 files changed, 20 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4478273e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 70b902c..c0a5be9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1768,7 +1768,9 @@ public class Client implements AutoCloseable {
 }
 
 void setSaslClient(SaslRpcClient client) throws IOException {
-  setInputStream(client.getInputStream(in));
+  // Wrap the input stream in a BufferedInputStream to fill the buffer
+  // before reading its length (HADOOP-14062).
+  setInputStream(new BufferedInputStream(client.getInputStream(in)));
   setOutputStream(client.getOutputStream(out));
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4478273e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 43c0271..06ba137 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -40,6 +40,7 @@ import java.util.Set;
 import java.util.TreeSet;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
@@ -126,8 +127,12 @@ public class TestAMRMClient {
 
   @Before
   public void setup() throws Exception {
-// start minicluster
 conf = new YarnConfiguration();
+createClusterAndStartApplication();
+  }
+
+  private void createClusterAndStartApplication() throws Exception {
+// start minicluster
 conf.set(YarnConfiguration.RM_SCHEDULER, schedulerName);
 conf.setLong(
   YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
@@ -866,6 +871,17 @@ public class TestAMRMClient {
 initAMRMClientAndTest(true);
   }
 
+  @Test (timeout=6)
+  public void testAMRMClientWithSaslEncryption() throws Exception {
+// we have to create a new instance of MiniYARNCluster to avoid SASL qop
+// mismatches between client and server
+teardown();
+conf = new YarnConfiguration();
+conf.set(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION, "privacy");
+createClusterAndStartApplication();
+initAMRMClientAndTest(false);
+  }
+
   private void initAMRMClientAndTest(boolean useAllocReqId)
   throws YarnException, IOException {
 AMRMClient amClient = null;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: YARN-6310. OutputStreams in AggregatedLogFormat.LogWriter can be left open upon exceptions. Contributed by Haibo Chen

2017-03-13 Thread stevel
YARN-6310. OutputStreams in AggregatedLogFormat.LogWriter can be left open upon 
exceptions. Contributed by Haibo Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/deb9f569
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/deb9f569
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/deb9f569

Branch: refs/heads/HADOOP-13345
Commit: deb9f569465bb760e661e60a313dad1605635236
Parents: e06ff18
Author: Jason Lowe 
Authored: Fri Mar 10 11:07:19 2017 -0600
Committer: Jason Lowe 
Committed: Fri Mar 10 11:08:33 2017 -0600

--
 .../logaggregation/AggregatedLogFormat.java | 52 ++--
 1 file changed, 26 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/deb9f569/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index 02f7782..1b46007 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -486,34 +486,34 @@ public class AggregatedLogFormat {
 }
 
 private void writeVersion() throws IOException {
-  DataOutputStream out = this.writer.prepareAppendKey(-1);
-  VERSION_KEY.write(out);
-  out.close();
-  out = this.writer.prepareAppendValue(-1);
-  out.writeInt(VERSION);
-  out.close();
+  try (DataOutputStream out = this.writer.prepareAppendKey(-1)) {
+VERSION_KEY.write(out);
+  }
+  try (DataOutputStream out = this.writer.prepareAppendValue(-1)) {
+out.writeInt(VERSION);
+  }
 }
 
 public void writeApplicationOwner(String user) throws IOException {
-  DataOutputStream out = this.writer.prepareAppendKey(-1);
-  APPLICATION_OWNER_KEY.write(out);
-  out.close();
-  out = this.writer.prepareAppendValue(-1);
-  out.writeUTF(user);
-  out.close();
+  try (DataOutputStream out = this.writer.prepareAppendKey(-1)) {
+APPLICATION_OWNER_KEY.write(out);
+  }
+  try (DataOutputStream out = this.writer.prepareAppendValue(-1)) {
+out.writeUTF(user);
+  }
 }
 
 public void writeApplicationACLs(Map 
appAcls)
 throws IOException {
-  DataOutputStream out = this.writer.prepareAppendKey(-1);
-  APPLICATION_ACL_KEY.write(out);
-  out.close();
-  out = this.writer.prepareAppendValue(-1);
-  for (Entry entry : appAcls.entrySet()) {
-out.writeUTF(entry.getKey().toString());
-out.writeUTF(entry.getValue());
+  try (DataOutputStream out = this.writer.prepareAppendKey(-1)) {
+APPLICATION_ACL_KEY.write(out);
+  }
+  try (DataOutputStream out = this.writer.prepareAppendValue(-1)) {
+for (Entry entry : appAcls.entrySet()) {
+  out.writeUTF(entry.getKey().toString());
+  out.writeUTF(entry.getValue());
+}
   }
-  out.close();
 }
 
 public void append(LogKey logKey, LogValue logValue) throws IOException {
@@ -522,12 +522,12 @@ public class AggregatedLogFormat {
   if (pendingUploadFiles.size() == 0) {
 return;
   }
-  DataOutputStream out = this.writer.prepareAppendKey(-1);
-  logKey.write(out);
-  out.close();
-  out = this.writer.prepareAppendValue(-1);
-  logValue.write(out, pendingUploadFiles);
-  out.close();
+  try (DataOutputStream out = this.writer.prepareAppendKey(-1)) {
+logKey.write(out);
+  }
+  try (DataOutputStream out = this.writer.prepareAppendValue(-1)) {
+logValue.write(out, pendingUploadFiles);
+  }
 }
 
 public void close() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: YARN-6196. Improve Resource Donut chart with better label in Node page of new YARN UI. Contributed by Akhil PB.

2017-03-13 Thread stevel
YARN-6196. Improve Resource Donut chart with better label in Node page of new 
YARN UI. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e06ff18a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e06ff18a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e06ff18a

Branch: refs/heads/HADOOP-13345
Commit: e06ff18ab68d23a0f236df8a0603a42367927f3c
Parents: fd26783
Author: Sunil G 
Authored: Fri Mar 10 16:17:48 2017 +0530
Committer: Sunil G 
Committed: Fri Mar 10 16:17:48 2017 +0530

--
 .../main/webapp/app/helpers/log-files-comma.js  | 10 -
 .../app/serializers/yarn-node-container.js  |  2 +-
 .../main/webapp/app/serializers/yarn-node.js|  4 ++--
 .../main/webapp/app/serializers/yarn-rm-node.js |  4 ++--
 .../src/main/webapp/app/templates/yarn-node.hbs | 23 +---
 .../main/webapp/app/templates/yarn-nodes.hbs|  2 +-
 .../webapp/app/templates/yarn-nodes/table.hbs   | 13 ++-
 7 files changed, 37 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e06ff18a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
index 78dcf25..026cd7f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
@@ -35,8 +35,16 @@ export default Ember.Helper.helper(function(params,hash) {
   var containerId = hash.containerId;
   var html = '';
   for (var i = 0; i < logFilesLen; i++) {
+var logFileName = "";
+if (logFiles[i]) {
+  if (typeof logFiles[i] === "object" && logFiles[i].containerLogFiles) {
+logFileName = logFiles[i].containerLogFiles;
+  } else if (typeof logFiles[i] === "string") {
+logFileName = logFiles[i];
+  }
+}
 html = html + '' + logFiles[i] +
+nodeAddr + '/' + containerId + '/' + logFileName + '">' + logFileName +
 '';
 if (i !== logFilesLen - 1) {
   html = html + ",";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e06ff18a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
index 7e78987..7bcb655 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
@@ -30,7 +30,7 @@ export default DS.JSONAPISerializer.extend({
 containerId: payload.id,
 state: payload.state,
 user: payload.user,
-diagnostics: payload.diagnostics,
+diagnostics: payload.diagnostics || 'N/A',
 exitCode: payload.exitCode,
 totalMemoryNeeded: payload.totalMemoryNeededMB,
 totalVCoresNeeded: payload.totalVCoresNeeded,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e06ff18a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
index 0d9faec..10521e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
@@ -36,8 +36,8 @@ export default DS.JSONAPISerializer.extend({
 pmemCheckEnabled: payload.pmemCheckEnabled,
 nodeHealthy: payload.nodeHealthy,
 lastNodeUpdateTime: 
Converter.timeStampToDate(payload.lastNodeUpdateTime),
-healthReport: payload.healthReport,
-nmStartupTime: Converter.timeStampToDate(payload.nmStartupTime),
+healthReport: payload.healthReport || 'N/A',
+nmStartupTime: payload.nmStartupTime? 
Converter.timeStampToDate(payload.nmStartupTime) : '',
 nodeManagerBuildVersion: payload.nodeManagerBuildVersion,
 hadoopBuildVersion: payload.hadoopBuildVersion
   

[29/50] [abbrv] hadoop git commit: HADOOP-14052. Fix dead link in KMS document. Contributed by Christina Vu.

2017-03-13 Thread stevel
HADOOP-14052. Fix dead link in KMS document. Contributed by Christina Vu.

Change-Id: I7093f443d93927184196f62f02cc106a2c89e9cf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/570827a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/570827a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/570827a8

Branch: refs/heads/HADOOP-13345
Commit: 570827a819c586b31e88621a9bb1d8118d3c7df3
Parents: 33a38a5
Author: John Zhuge 
Authored: Wed Mar 8 23:50:15 2017 -0800
Committer: John Zhuge 
Committed: Wed Mar 8 23:50:15 2017 -0800

--
 hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/570827a8/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
--
diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm 
b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
index c1f9b13..4573b06 100644
--- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
+++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
@@ -956,7 +956,7 @@ $H4 Re-encrypt Encrypted Key With The Latest KeyVersion
 
 This command takes a previously generated encrypted key, and re-encrypts it 
using the latest KeyVersion encryption key in the KeyProvider. If the latest 
KeyVersion is the same as the one used to generate the encrypted key, the same 
encrypted key is returned.
 
-This is usually useful after a [Rollover](Rollover_Key) of an encryption key. 
Re-encrypting the encrypted key will allow it to be encrypted using the latest 
version of the encryption key, but still with the same key material and 
initialization vector.
+This is usually useful after a [Rollover](#Rollover_Key) of an encryption key. 
Re-encrypting the encrypted key will allow it to be encrypted using the latest 
version of the encryption key, but still with the same key material and 
initialization vector.
 
 *REQUEST:*
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] [abbrv] hadoop git commit: HDFS-11506. Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test code. Contributed by Manoj Govindassamy.

2017-03-13 Thread stevel
HDFS-11506. Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test 
code. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/819808a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/819808a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/819808a0

Branch: refs/heads/HADOOP-13345
Commit: 819808a016e16325502169e0091a16a6b2ae5387
Parents: e96a0b8
Author: Andrew Wang 
Authored: Thu Mar 9 17:29:11 2017 -0800
Committer: Andrew Wang 
Committed: Thu Mar 9 17:29:11 2017 -0800

--
 .../namenode/ErasureCodingPolicyManager.java| 10 --
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  2 +-
 .../hdfs/ErasureCodeBenchmarkThroughput.java|  5 +--
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 12 +++
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |  3 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  3 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |  3 +-
 .../hdfs/TestDecommissionWithStriped.java   |  5 ++-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  8 ++---
 .../TestErasureCodingPolicyWithSnapshot.java|  3 +-
 .../apache/hadoop/hdfs/TestFileChecksum.java|  5 ++-
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java |  3 +-
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   |  3 +-
 .../hdfs/TestReadStripedFileWithDecoding.java   |  5 ++-
 .../TestReadStripedFileWithMissingBlocks.java   |  3 +-
 .../hadoop/hdfs/TestReconstructStripedFile.java |  7 ++---
 .../hdfs/TestSafeModeWithStripedFile.java   |  5 ++-
 .../TestUnsetAndChangeDirectoryEcPolicy.java|  3 +-
 .../hadoop/hdfs/TestWriteReadStripedFile.java   |  5 ++-
 .../hdfs/TestWriteStripedFileWithFailure.java   |  5 ++-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java| 12 +++
 .../hdfs/server/balancer/TestBalancer.java  |  5 ++-
 .../blockmanagement/TestBlockInfoStriped.java   |  4 +--
 .../TestBlockTokenWithDFSStriped.java   |  6 ++--
 .../TestLowRedundancyBlockQueues.java   |  4 +--
 ...constructStripedBlocksWithRackAwareness.java | 10 +++---
 .../TestSequentialBlockGroupId.java |  6 ++--
 .../TestSortLocatedStripedBlock.java|  4 +--
 .../hdfs/server/datanode/TestBlockRecovery.java |  3 +-
 .../TestDataNodeErasureCodingMetrics.java   |  5 ++-
 .../hadoop/hdfs/server/mover/TestMover.java |  5 ++-
 .../TestAddOverReplicatedStripedBlocks.java |  6 ++--
 .../namenode/TestAddStripedBlockInFBR.java  |  5 +--
 .../server/namenode/TestAddStripedBlocks.java   |  7 +++--
 .../server/namenode/TestEnabledECPolicies.java  | 12 +++
 .../server/namenode/TestFSEditLogLoader.java|  3 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 33 +---
 .../server/namenode/TestNameNodeMXBean.java | 12 +++
 .../namenode/TestQuotaWithStripedBlocks.java|  3 +-
 .../namenode/TestReconstructStripedBlocks.java  |  6 ++--
 .../server/namenode/TestStripedINodeFile.java   |  5 +--
 ...TestOfflineImageViewerWithStripedBlocks.java |  8 ++---
 42 files changed, 121 insertions(+), 141 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/819808a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 02cbbdf..29af207 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -130,16 +130,6 @@ public final class ErasureCodingPolicyManager {
   }
 
   /**
-   * Get system-wide default policy, which can be used by default
-   * when no policy is specified for a path.
-   * @return ecPolicy
-   */
-  public static ErasureCodingPolicy getSystemDefaultPolicy() {
-// make this configurable?
-return SYS_POLICY1;
-  }
-
-  /**
* Get a policy by policy ID.
* @return ecPolicy, or null if not found
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/819808a0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 7bf5cdc..1329195 100644
--- 

[35/50] [abbrv] hadoop git commit: YARN-1047. Expose # of pre-emptions as a queue counter (Contributed by Karthik Kambatla via Daniel Templeton)

2017-03-13 Thread stevel
YARN-1047. Expose # of pre-emptions as a queue counter (Contributed by Karthik 
Kambatla via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/846a0cd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/846a0cd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/846a0cd6

Branch: refs/heads/HADOOP-13345
Commit: 846a0cd678fba743220f28cef844ac9011a3f934
Parents: 819808a
Author: Daniel Templeton 
Authored: Thu Mar 9 17:51:47 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Mar 9 17:51:47 2017 -0800

--
 .../server/resourcemanager/scheduler/QueueMetrics.java | 13 +
 .../resourcemanager/scheduler/fair/FSAppAttempt.java   |  5 +
 .../scheduler/fair/TestFairSchedulerPreemption.java| 12 +---
 3 files changed, 27 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/846a0cd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 4e364f7..007d2b3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -71,6 +71,8 @@ public class QueueMetrics implements MetricsSource {
   @Metric("Aggregate # of allocated off-switch containers")
 MutableCounterLong aggregateOffSwitchContainersAllocated;
   @Metric("Aggregate # of released containers") MutableCounterLong 
aggregateContainersReleased;
+  @Metric("Aggregate # of preempted containers") MutableCounterLong
+  aggregateContainersPreempted;
   @Metric("Available memory in MB") MutableGaugeLong availableMB;
   @Metric("Available CPU in virtual cores") MutableGaugeInt availableVCores;
   @Metric("Pending memory allocation in MB") MutableGaugeLong pendingMB;
@@ -476,6 +478,13 @@ public class QueueMetrics implements MetricsSource {
 }
   }
 
+  public void preemptContainer() {
+aggregateContainersPreempted.incr();
+if (parent != null) {
+  parent.preemptContainer();
+}
+  }
+
   public void reserveResource(String user, Resource res) {
 reservedContainers.incr();
 reservedMB.incr(res.getMemorySize());
@@ -640,4 +649,8 @@ public class QueueMetrics implements MetricsSource {
   public long getAggegatedReleasedContainers() {
 return aggregateContainersReleased.value();
   }
+
+  public long getAggregatePreemptedContainers() {
+return aggregateContainersPreempted.value();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/846a0cd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 6c61b45..3a9c94e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -45,6 +45,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import 

[46/50] [abbrv] hadoop git commit: YARN-6069. CORS support in timeline v2 (Rohith Sharma K S via Varun Saxena)

2017-03-13 Thread stevel
YARN-6069. CORS support in timeline v2 (Rohith Sharma K S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/229c7c9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/229c7c9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/229c7c9f

Branch: refs/heads/HADOOP-13345
Commit: 229c7c9f8922f2b1bfd04b09b834e00d359046ff
Parents: 6d356b6
Author: Varun Saxena 
Authored: Sat Mar 11 02:11:49 2017 +0530
Committer: Varun Saxena 
Committed: Sat Mar 11 04:09:01 2017 +0530

--
 .../src/main/resources/yarn-default.xml  | 11 +++
 .../timelineservice/reader/TimelineReaderServer.java |  9 +
 .../src/site/markdown/TimelineServiceV2.md   |  9 +
 3 files changed, 29 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/229c7c9f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 645a342..727e2c9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3068,4 +3068,15 @@
 64
   
 
+  
+
+  Flag to enable cross-origin (CORS) support for timeline service v1.x or
+  Timeline Reader in timeline service v2. For timeline service v2, also add
+  org.apache.hadoop.security.HttpCrossOriginFilterInitializer to the
+  configuration hadoop.http.filter.initializers in core-site.xml.
+
+yarn.timeline-service.http-cross-origin.enabled
+false
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/229c7c9f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index 2835c1b..2faf4b6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.lib.StaticUserWebFilter;
+import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -134,6 +135,14 @@ public class TimelineReaderServer extends CompositeService 
{
 YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
 WebAppUtils.getTimelineReaderWebAppURL(conf));
 LOG.info("Instantiating TimelineReaderWebApp at " + bindAddress);
+boolean enableCorsFilter = conf.getBoolean(
+YarnConfiguration.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED,
+YarnConfiguration.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT);
+// setup CORS
+if (enableCorsFilter) {
+  conf.setBoolean(HttpCrossOriginFilterInitializer.PREFIX
+  + HttpCrossOriginFilterInitializer.ENABLED_SUFFIX, true);
+}
 try {
   HttpServer2.Builder builder = new HttpServer2.Builder()
 .setName("timeline")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/229c7c9f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index dc16803..bcbe0b7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 

[32/50] [abbrv] hadoop git commit: YARN-5669. Add support for docker pull command (Contribtued by luhuichun)

2017-03-13 Thread stevel
YARN-5669. Add support for docker pull command (Contribtued by luhuichun)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e96a0b8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e96a0b8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e96a0b8c

Branch: refs/heads/HADOOP-13345
Commit: e96a0b8c92b46aed7c1f5ccec13abc6c1043edba
Parents: 822a74f
Author: Sidharta S 
Authored: Thu Mar 9 16:22:19 2017 -0800
Committer: Sidharta S 
Committed: Thu Mar 9 16:22:19 2017 -0800

--
 .../linux/runtime/docker/DockerPullCommand.java | 31 +
 .../runtime/docker/TestDockerPullCommand.java   | 49 
 2 files changed, 80 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e96a0b8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
new file mode 100644
index 000..351e09e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerPullCommand.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+/**
+ * Encapsulates the docker pull command and its command
+ * line arguments.
+ */
+public class DockerPullCommand extends DockerCommand {
+  private static final String PULL_COMMAND = "pull";
+
+  public DockerPullCommand(String imageName) {
+super(PULL_COMMAND);
+super.addCommandArguments(imageName);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e96a0b8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
new file mode 100644
index 000..89157ff
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerPullCommand.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing 

[33/50] [abbrv] hadoop git commit: HDFS-11506. Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test code. Contributed by Manoj Govindassamy.

2017-03-13 Thread stevel
http://git-wip-us.apache.org/repos/asf/hadoop/blob/819808a0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
index e7794d6..0bfa054 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
@@ -31,12 +31,12 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@@ -46,7 +46,7 @@ import org.junit.Test;
 
 public class TestOfflineImageViewerWithStripedBlocks {
   private final ErasureCodingPolicy ecPolicy =
-  ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  StripedFileTestUtil.getDefaultECPolicy();
   private int dataBlocks = ecPolicy.getNumDataUnits();
   private int parityBlocks = ecPolicy.getNumParityUnits();
 
@@ -64,7 +64,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
 cluster.waitActive();
 cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
-ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
+StripedFileTestUtil.getDefaultECPolicy().getName());
 fs = cluster.getFileSystem();
 Path eczone = new Path("/eczone");
 fs.mkdirs(eczone);
@@ -144,7 +144,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
 // Verify space consumed present in BlockInfoStriped
 FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
 INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
-assertEquals(ErasureCodingPolicyManager.getSystemDefaultPolicy().getId(),
+assertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(),
 fileNode.getErasureCodingPolicyID());
 assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
 long actualFileSize = 0;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: HDFS-11499. Decommissioning stuck because of failing recovery. Contributed by Lukas Majercak and Manoj Govindassamy.

2017-03-13 Thread stevel
HDFS-11499. Decommissioning stuck because of failing recovery. Contributed by 
Lukas Majercak and Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/385d2cb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/385d2cb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/385d2cb7

Branch: refs/heads/HADOOP-13345
Commit: 385d2cb777a0272ac20c62336c944fad295d5d12
Parents: 570827a
Author: Masatake Iwasaki 
Authored: Thu Mar 9 13:30:33 2017 +0900
Committer: Masatake Iwasaki 
Committed: Thu Mar 9 21:13:50 2017 +0900

--
 .../server/blockmanagement/BlockManager.java| 10 +++-
 .../apache/hadoop/hdfs/TestDecommission.java| 48 ++
 .../hadoop/hdfs/TestMaintenanceState.java   | 51 
 3 files changed, 108 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/385d2cb7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 9ec28f9..5dc40fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -891,7 +891,15 @@ public class BlockManager implements BlockStatsMXBean {
   lastBlock.getUnderConstructionFeature()
   .updateStorageScheduledSize((BlockInfoStriped) lastBlock);
 }
-if (hasMinStorage(lastBlock)) {
+
+// Count replicas on decommissioning nodes, as these will not be
+// decommissioned unless recovery/completing last block has finished
+NumberReplicas numReplicas = countNodes(lastBlock);
+int numUsableReplicas = numReplicas.liveReplicas() +
+numReplicas.decommissioning() +
+numReplicas.liveEnteringMaintenanceReplicas();
+
+if (hasMinStorage(lastBlock, numUsableReplicas)) {
   if (committed) {
 addExpectedReplicasToPending(lastBlock);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/385d2cb7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 94e8946..dc0edcc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -33,6 +33,7 @@ import java.util.concurrent.ExecutionException;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -646,6 +647,53 @@ public class TestDecommission extends AdminStatesBaseTest {
 
 fdos.close();
   }
+
+  @Test(timeout = 36)
+  public void testDecommissionWithOpenFileAndBlockRecovery()
+  throws IOException, InterruptedException {
+startCluster(1, 6);
+getCluster().waitActive();
+
+Path file = new Path("/testRecoveryDecommission");
+
+// Create a file and never close the output stream to trigger recovery
+DistributedFileSystem dfs = getCluster().getFileSystem();
+FSDataOutputStream out = dfs.create(file, true,
+getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 
4096),
+(short) 3, blockSize);
+
+// Write data to the file
+long writtenBytes = 0;
+while (writtenBytes < fileSize) {
+  out.writeLong(writtenBytes);
+  writtenBytes += 8;
+}
+out.hsync();
+
+DatanodeInfo[] lastBlockLocations = NameNodeAdapter.getBlockLocations(
+  getCluster().getNameNode(), "/testRecoveryDecommission", 0, fileSize)
+  .getLastLocatedBlock().getLocations();
+
+// Decommission all nodes of the last block
+ArrayList toDecom = new ArrayList<>();
+for (DatanodeInfo dnDecom : lastBlockLocations) {
+  toDecom.add(dnDecom.getXferAddr());
+}
+initExcludeHosts(toDecom);
+refreshNodes(0);
+
+// Make sure hard lease expires to trigger replica recovery
+getCluster().setLeasePeriod(300L, 300L);
+

[31/50] [abbrv] hadoop git commit: YARN-6300. NULL_UPDATE_REQUESTS is redundant in TestFairScheduler (Contributed by Yuanbo Liu via Daniel Templeton)

2017-03-13 Thread stevel
YARN-6300. NULL_UPDATE_REQUESTS is redundant in TestFairScheduler (Contributed 
by Yuanbo Liu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/822a74f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/822a74f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/822a74f2

Branch: refs/heads/HADOOP-13345
Commit: 822a74f2ae955ea0893cc02fb36ceb49ceba8014
Parents: 385d2cb
Author: Daniel Templeton 
Authored: Thu Mar 9 12:12:47 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Mar 9 12:14:33 2017 -0800

--
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/822a74f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 31dd7fe..028eea6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -96,7 +96,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdate
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 
 
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
@@ -119,7 +118,6 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mock;
 import org.mockito.Mockito;
 import org.xml.sax.SAXException;
 
@@ -130,8 +128,6 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
   private final int GB = 1024;
   private final static String ALLOC_FILE =
   new File(TEST_DIR, "test-queues").getAbsolutePath();
-  private final static ContainerUpdates NULL_UPDATE_REQUESTS =
-  new ContainerUpdates();
 
   @Before
   public void setUp() throws IOException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: HADOOP-13946. Document how HDFS updates timestamps in the FS spec; compare with object stores. Contributed by Steve Loughran

2017-03-13 Thread stevel
HADOOP-13946. Document how HDFS updates timestamps in the FS spec; compare with 
object stores. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd26783a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd26783a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd26783a

Branch: refs/heads/HADOOP-13345
Commit: fd26783aaf3deea7a4e197439bd1075a6689681f
Parents: 881ec4d
Author: Mingliang Liu 
Authored: Fri Mar 10 00:21:20 2017 -0800
Committer: Mingliang Liu 
Committed: Fri Mar 10 00:21:20 2017 -0800

--
 .../site/markdown/filesystem/introduction.md| 85 
 1 file changed, 85 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd26783a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
index f6db557..12a7967 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
@@ -392,3 +392,88 @@ Object stores with these characteristics, can not be used 
as a direct replacemen
 for HDFS. In terms of this specification, their implementations of the
 specified operations do not match those required. They are considered supported
 by the Hadoop development community, but not to the same extent as HDFS.
+
+ Timestamps
+
+
+`FileStatus` entries have a modification time and an access time.
+
+1. The exact behavior as to when these timestamps are set and whether or not 
they are valid
+varies between filesystems, and potentially between individual installations 
of a filesystem.
+1. The granularity of the timestamps is again, specific to both a filesystem
+and potentially individual installations.
+
+The HDFS filesystem does not update the modification time while it is being 
written to.
+
+Specifically
+
+* `FileSystem.create()` creation: a zero-byte file is listed; the modification 
time is
+  set to the current time as seen on the NameNode.
+* Writes to a file via the output stream returned in the `create()` call: the 
modification
+  time *does not change*.
+* When `OutputStream.close()` is called, all remaining data is written, the 
file closed and
+  the NameNode updated with the final size of the file. The modification time 
is set to
+  the time the file was closed.
+* Opening a file for appends via an `append()` operation does not change the 
modification
+  time of the file until the `close()` call is made on the output stream.
+* `FileSystem.setTimes()` can be used to explicitly set the time on a file.
+* When a file is renamed, its modification time is not changed, but the source
+  and destination directories have their modification times updated.
+* The rarely used operations:  `FileSystem.concat()`, `createSnapshot()`,
+ `createSymlink()` and `truncate()` all update the modification time.
+* The access time granularity is set in milliseconds 
`dfs.namenode.access.time.precision`;
+  the default granularity is 1 hour. If the precision is set to zero, access 
times
+  are not recorded.
+* If a modification or access time is not set, the value of that `FileStatus`
+field is 0.
+
+Other filesystems may have different behaviors. In particular,
+
+* Access times may or may not be supported; even if the underlying FS may 
support access times,
+  the option it is often disabled for performance reasons.
+* The granularity of the timestamps is an implementation-specific detail.
+
+
+Object stores have an even vaguer view of time, which can be summarized as
+"it varies".
+
+ * The timestamp granularity is likely to be 1 second, that being the 
granularity
+   of timestamps returned in HTTP HEAD and GET requests.
+ * Access times are likely to be unset. That is, `FileStatus.getAccessTime() 
== 0`.
+ * The modification timestamp for a newly created file MAY be that of the
+  `create()` call, or the actual time which the PUT request was initiated.
+   This may be in the  `FileSystem.create()` call, the final
+   `OutputStream.close()` operation, some period in between.
+ * The modification time may not be updated in the `close()` call.
+ * The timestamp is likely to be in UTC or the TZ of the object store. If the
+   client is in a different timezone, the timestamp of objects may be ahead or
+   behind that of the client.
+ * Object stores with cached metadata databases (for example: AWS S3 with
+   an in-memory or a DynamoDB metadata store) may have timestamps generated
+   from the local 

[49/50] [abbrv] hadoop git commit: HDFS-11512. Increase timeout on TestShortCircuitLocalRead#testSkipWithVerifyChecksum. Contributed by Eric Badger.

2017-03-13 Thread stevel
HDFS-11512. Increase timeout on 
TestShortCircuitLocalRead#testSkipWithVerifyChecksum. Contributed by Eric 
Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79924266
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79924266
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79924266

Branch: refs/heads/HADOOP-13345
Commit: 79924266f8f68e5e7c873e6b12e3b3acfcd708da
Parents: 04a5f5a
Author: Yiqun Lin 
Authored: Mon Mar 13 18:22:30 2017 +0800
Committer: Yiqun Lin 
Committed: Mon Mar 13 18:22:30 2017 +0800

--
 .../apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79924266/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
index 55e9795..f2ee48c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
@@ -388,7 +388,7 @@ public class TestShortCircuitLocalRead {
 }
   }
 
-  @Test(timeout=1)
+  @Test(timeout=6)
   public void testSkipWithVerifyChecksum() throws IOException {
 int size = blockSize;
 Configuration conf = new Configuration();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: YARN-6042. Dump scheduler and queue state information into FairScheduler DEBUG log. (Yufei Gu via rchiang)

2017-03-13 Thread stevel
YARN-6042. Dump scheduler and queue state information into FairScheduler DEBUG 
log. (Yufei Gu via rchiang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4db9cc70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4db9cc70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4db9cc70

Branch: refs/heads/HADOOP-13345
Commit: 4db9cc70d0178703fb28f451eb84d97f2bf63af8
Parents: 229c7c9
Author: Ray Chiang 
Authored: Fri Mar 10 16:13:31 2017 -0800
Committer: Ray Chiang 
Committed: Fri Mar 10 16:13:31 2017 -0800

--
 .../src/main/conf/log4j.properties  |  9 +++
 .../scheduler/fair/FSAppAttempt.java| 49 +++---
 .../scheduler/fair/FSLeafQueue.java | 21 ++
 .../scheduler/fair/FSParentQueue.java   | 21 ++
 .../resourcemanager/scheduler/fair/FSQueue.java | 41 ++-
 .../scheduler/fair/FairScheduler.java   | 28 +---
 .../scheduler/fair/TestFairScheduler.java   | 71 
 7 files changed, 206 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db9cc70/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index b8c84e7..6026763 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -321,3 +321,12 @@ 
log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
 log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
 log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
 log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
+
+# Fair scheduler requests log on state dump
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSLOGGER
+log4j.appender.FSLOGGER=org.apache.log4j.RollingFileAppender
+log4j.appender.FSLOGGER.File=${hadoop.log.dir}/fairscheduler-statedump.log
+log4j.appender.FSLOGGER.layout=org.apache.log4j.PatternLayout
+log4j.appender.FSLOGGER.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.FSLOGGER.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.FSLOGGER.MaxBackupIndex=${hadoop.log.maxbackupindex}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db9cc70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 3a9c94e..ccfcffb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -835,25 +835,27 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   return capability;
 }
 
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Resource request: " + capability + " exceeds the available"
+  + " resources of the node.");
+}
+
 // The desired container won't fit here, so reserve
 if (isReservable(capability) &&
 reserve(pendingAsk.getPerAllocationResource(), node, reservedContainer,
 type, schedulerKey)) {
-  if (isWaitingForAMContainer()) {
-updateAMDiagnosticMsg(capability,
-" exceed the available resources of the node and the request is"
-+ " reserved");
+  updateAMDiagnosticMsg(capability, " exceeds the available resources of "
+  + "the node and the request is reserved)");
+  if (LOG.isDebugEnabled()) {
+LOG.debug(getName() + "'s resource request is reserved.");
   }
   return FairScheduler.CONTAINER_RESERVED;
 } else {
-  if (isWaitingForAMContainer()) {
-updateAMDiagnosticMsg(capability,
-" exceed the available resources of the node and the request 
cannot"
-+ " 

[21/50] [abbrv] hadoop git commit: HDFS-11152. Start erasure coding policy ID number from 1 instead of 0 to void potential unexpected errors. Contributed by SammiChen.

2017-03-13 Thread stevel
HDFS-11152. Start erasure coding policy ID number from 1 instead of 0 to void 
potential unexpected errors. Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5addacb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5addacb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5addacb1

Branch: refs/heads/HADOOP-13345
Commit: 5addacb1e301991a8285a221c726f66330cd6d08
Parents: 4ebe8a6
Author: Andrew Wang 
Authored: Wed Mar 8 08:47:38 2017 -0800
Committer: Andrew Wang 
Committed: Wed Mar 8 08:47:38 2017 -0800

--
 .../org/apache/hadoop/hdfs/protocol/HdfsConstants.java| 10 +-
 .../hadoop/hdfs/server/namenode/FSImageFormatPBINode.java |  3 +++
 2 files changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5addacb1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index a9f1839..d2209a4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -144,11 +144,11 @@ public final class HdfsConstants {
 ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE
   }
 
-  public static final byte RS_6_3_POLICY_ID = 0;
-  public static final byte RS_3_2_POLICY_ID = 1;
-  public static final byte RS_6_3_LEGACY_POLICY_ID = 2;
-  public static final byte XOR_2_1_POLICY_ID = 3;
-  public static final byte RS_10_4_POLICY_ID = 4;
+  public static final byte RS_6_3_POLICY_ID = 1;
+  public static final byte RS_3_2_POLICY_ID = 2;
+  public static final byte RS_6_3_LEGACY_POLICY_ID = 3;
+  public static final byte XOR_2_1_POLICY_ID = 4;
+  public static final byte RS_10_4_POLICY_ID = 5;
 
   /* Hidden constructor */
   protected HdfsConstants() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5addacb1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 0ceae78..17b1da7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -342,6 +342,9 @@ public final class FSImageFormatPBINode {
   for (int i = 0; i < bp.size(); ++i) {
 BlockProto b = bp.get(i);
 if (isStriped) {
+  Preconditions.checkState(ecPolicy.getId() > 0,
+  "File with ID " + n.getId() +
+  " has an invalid erasure coding policy ID " + ecPolicy.getId());
   blocks[i] = new BlockInfoStriped(PBHelperClient.convert(b), 
ecPolicy);
 } else {
   blocks[i] = new BlockInfoContiguous(PBHelperClient.convert(b),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: HDFS-10983. OIV tool should make an EC file explicit. Contributed by Manoj Govindassamy.

2017-03-13 Thread stevel
HDFS-10983. OIV tool should make an EC file explicit. Contributed by Manoj 
Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ca6ef0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ca6ef0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ca6ef0c

Branch: refs/heads/HADOOP-13345
Commit: 5ca6ef0c268b1acb3abf12505b9ead6fe7e38a23
Parents: d7762a5
Author: Andrew Wang 
Authored: Wed Mar 8 15:36:19 2017 -0800
Committer: Andrew Wang 
Committed: Wed Mar 8 15:36:19 2017 -0800

--
 .../server/namenode/FSImageFormatPBINode.java   |  1 +
 .../OfflineImageReconstructor.java  |  4 +
 .../offlineImageViewer/PBImageXmlWriter.java| 15 ++-
 .../hdfs/server/namenode/TestFSImage.java   |  1 +
 .../TestOfflineImageViewer.java | 99 +++-
 5 files changed, 112 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ca6ef0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 17b1da7..ef334f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -332,6 +332,7 @@ public final class FSImageFormatPBINode {
   BlockType blockType = PBHelperClient.convert(f.getBlockType());
   LoaderContext state = parent.getLoaderContext();
   boolean isStriped = f.hasErasureCodingPolicyID();
+  assert ((!isStriped) || (isStriped && !f.hasReplication()));
   Short replication = (!isStriped ? (short) f.getReplication() : null);
   ErasureCodingPolicy ecPolicy = isStriped ?
   ErasureCodingPolicyManager.getPolicyByPolicyID(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ca6ef0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
index ed348d3..e80f4d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
@@ -647,6 +647,10 @@ class OfflineImageReconstructor {
 break;
   case "STRIPED":
 bld.setBlockType(HdfsProtos.BlockTypeProto.STRIPED);
+ival = node.removeChildInt(INODE_SECTION_EC_POLICY_ID);
+if (ival != null) {
+  bld.setErasureCodingPolicyID(ival);
+}
 break;
   default:
 throw new IOException("INode XML found with unknown  " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ca6ef0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
index f8734cb..5a42a6b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
@@ -40,7 +40,6 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheD
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos;
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
 import 

[15/50] [abbrv] hadoop git commit: YARN-6287. RMCriticalThreadUncaughtExceptionHandler.rmContext should be final (Contributed by Corey Barker via Daniel Templeton)

2017-03-13 Thread stevel
YARN-6287. RMCriticalThreadUncaughtExceptionHandler.rmContext should be final 
(Contributed by Corey Barker via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0c239cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0c239cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0c239cd

Branch: refs/heads/HADOOP-13345
Commit: e0c239cdbda336e09a35d112d451c2e17d74a3fc
Parents: 1f9848d
Author: Daniel Templeton 
Authored: Tue Mar 7 11:58:48 2017 -0800
Committer: Daniel Templeton 
Committed: Tue Mar 7 11:58:48 2017 -0800

--
 .../resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0c239cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java
index c5c6087..a67f81a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMCriticalThreadUncaughtExceptionHandler.java
@@ -37,7 +37,7 @@ public class RMCriticalThreadUncaughtExceptionHandler
 implements UncaughtExceptionHandler {
   private static final Log LOG = LogFactory.getLog(
   RMCriticalThreadUncaughtExceptionHandler.class);
-  private RMContext rmContext;
+  private final RMContext rmContext;
 
   public RMCriticalThreadUncaughtExceptionHandler(RMContext rmContext) {
 this.rmContext = rmContext;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: YARN-6297. TestAppLogAggregatorImp.verifyFilesUploaded() should check # of filed uploaded with that of files expected (haibochen via rkanter)

2017-03-13 Thread stevel
YARN-6297. TestAppLogAggregatorImp.verifyFilesUploaded() should check # of 
filed uploaded with that of files expected (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/287ba4ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/287ba4ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/287ba4ff

Branch: refs/heads/HADOOP-13345
Commit: 287ba4ffa66212c02e1b1edc8fca53f6368a9efc
Parents: 98142d2
Author: Robert Kanter 
Authored: Wed Mar 8 10:45:33 2017 -0800
Committer: Robert Kanter 
Committed: Wed Mar 8 10:45:33 2017 -0800

--
 .../logaggregation/TestAppLogAggregatorImpl.java | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/287ba4ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
index 2602d55..17d527a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
@@ -146,7 +146,7 @@ public class TestAppLogAggregatorImpl {
 
 verifyLogAggregationWithExpectedFiles2DeleteAndUpload(applicationId,
 containerId, logRententionSec, recoveredLogInitedTimeMillis,
-logFiles, new HashSet());
+logFiles, logFiles);
   }
 
   @Test
@@ -170,7 +170,7 @@ public class TestAppLogAggregatorImpl {
 
 final long week = 7 * 24 * 60 * 60;
 final long recoveredLogInitedTimeMillis = System.currentTimeMillis() -
-2*week;
+2 * week * 1000;
 verifyLogAggregationWithExpectedFiles2DeleteAndUpload(
 applicationId, containerId, week, recoveredLogInitedTimeMillis,
 logFiles, new HashSet());
@@ -257,7 +257,7 @@ public class TestAppLogAggregatorImpl {
   Set filesExpected) {
 final String errMsgPrefix = "The set of files uploaded are not the same " +
 "as expected";
-if(filesUploaded.size() != filesUploaded.size()) {
+if(filesUploaded.size() != filesExpected.size()) {
   fail(errMsgPrefix + ": actual size: " + filesUploaded.size() + " vs " +
   "expected size: " + filesExpected.size());
 }
@@ -413,7 +413,7 @@ public class TestAppLogAggregatorImpl {
 FileContext lfs, long recoveredLogInitedTime) throws IOException {
   super(dispatcher, deletionService, conf, appId, ugi, nodeId,
   dirsHandler, remoteNodeLogFileForApp, appAcls,
-  logAggregationContext, context, lfs, recoveredLogInitedTime);
+  logAggregationContext, context, lfs, -1, recoveredLogInitedTime);
   this.applicationId = appId;
   this.deletionService = deletionService;
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: MAPREDUCE-6859. hadoop-mapreduce-client-jobclient.jar sets a main class that isn't in the JAR. Contributed by Daniel Templeton

2017-03-13 Thread stevel
MAPREDUCE-6859. hadoop-mapreduce-client-jobclient.jar sets a main class that 
isn't in the JAR. Contributed by Daniel Templeton


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ebe8a6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ebe8a6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ebe8a6a

Branch: refs/heads/HADOOP-13345
Commit: 4ebe8a6a237258de9a7d8b041d78249bd3cca7a6
Parents: 1eb8186
Author: Jason Lowe 
Authored: Wed Mar 8 10:27:57 2017 -0600
Committer: Jason Lowe 
Committed: Wed Mar 8 10:27:57 2017 -0600

--
 .../hadoop-mapreduce-client-jobclient/pom.xml   | 12 +---
 1 file changed, 5 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ebe8a6a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index 5cecebb..1747f59 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -143,17 +143,15 @@
   
 **/hdfs-site.xml
   
+  
+
+  
org.apache.hadoop.test.MapredTestDriver
+
+  
 
 test-compile
   
 
-   
- 
-  
-   org.apache.hadoop.test.MapredTestDriver
- 
- 
-
   
   
 org.apache.maven.plugins


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: Revert "HADOOP-13606 swift FS to add a service load metadata file. Contributed by Steve Loughran"

2017-03-13 Thread stevel
Revert "HADOOP-13606 swift FS to add a service load metadata file. Contributed 
by Steve Loughran"

This reverts commit 53a12fa721bb431f7d481aac7d245c93efb56153.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98142d2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98142d2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98142d2f

Branch: refs/heads/HADOOP-13345
Commit: 98142d2f722e82d57b0e2bae6276f7c17fd99598
Parents: 5addacb
Author: John Zhuge 
Authored: Mon Mar 6 11:14:33 2017 -0800
Committer: John Zhuge 
Committed: Wed Mar 8 09:54:22 2017 -0800

--
 .../src/main/resources/core-default.xml |  6 ++
 .../services/org.apache.hadoop.fs.FileSystem| 16 
 2 files changed, 6 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98142d2f/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 52b58ed..f742ba8 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -850,6 +850,12 @@
 
 
 
+  fs.swift.impl
+  org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem
+  The implementation class of the OpenStack Swift 
Filesystem
+
+
+
   fs.automatic.close
   true
   By default, FileSystem instances are automatically closed at 
program

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98142d2f/hadoop-tools/hadoop-openstack/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
--
diff --git 
a/hadoop-tools/hadoop-openstack/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-tools/hadoop-openstack/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 649ea31..000
--- 
a/hadoop-tools/hadoop-openstack/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: YARN-6207. Move application across queues should handle delayed event processing. Contributed by Bibin A Chundatt.

2017-03-13 Thread stevel
YARN-6207. Move application across queues should handle delayed event 
processing. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1eb81867
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1eb81867
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1eb81867

Branch: refs/heads/HADOOP-13345
Commit: 1eb81867032b016a59662043cbae50daa52dafa9
Parents: 28daaf0
Author: Sunil G 
Authored: Wed Mar 8 12:04:30 2017 +0530
Committer: Sunil G 
Committed: Wed Mar 8 12:04:30 2017 +0530

--
 .../scheduler/SchedulerApplicationAttempt.java  |   5 +-
 .../scheduler/capacity/CapacityScheduler.java   |  69 ---
 .../capacity/TestCapacityScheduler.java | 200 +++
 3 files changed, 248 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1eb81867/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index f894a40..91e29d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -1069,6 +1069,7 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
   QueueMetrics newMetrics = newQueue.getMetrics();
   String newQueueName = newQueue.getQueueName();
   String user = getUser();
+
   for (RMContainer liveContainer : liveContainers.values()) {
 Resource resource = liveContainer.getContainer().getResource();
 ((RMContainerImpl) liveContainer).setQueueName(newQueueName);
@@ -1084,7 +1085,9 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
 }
   }
 
-  appSchedulingInfo.move(newQueue);
+  if (!isStopped) {
+appSchedulingInfo.move(newQueue);
+  }
   this.queue = newQueue;
 } finally {
   writeLock.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1eb81867/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 20ea607..f6e7942 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -1939,36 +1939,47 @@ public class CapacityScheduler extends
   String targetQueueName) throws YarnException {
 try {
   writeLock.lock();
-  FiCaSchedulerApp app = getApplicationAttempt(
-  ApplicationAttemptId.newInstance(appId, 0));
-  String sourceQueueName = app.getQueue().getQueueName();
-  LeafQueue source = this.queueManager.getAndCheckLeafQueue(
-  sourceQueueName);
+  SchedulerApplication application =
+  applications.get(appId);
+  if (application == null) {
+throw new YarnException("App to be moved " + appId + " not found.");
+  }
+  String sourceQueueName = application.getQueue().getQueueName();
+  LeafQueue source =
+  this.queueManager.getAndCheckLeafQueue(sourceQueueName);
   String destQueueName = handleMoveToPlanQueue(targetQueueName);
   LeafQueue dest = this.queueManager.getAndCheckLeafQueue(destQueueName);
 
-   

[37/50] [abbrv] hadoop git commit: YARN-6264. AM not launched when a single vcore is available on the cluster. (Yufei Gu via kasha)

2017-03-13 Thread stevel
YARN-6264. AM not launched when a single vcore is available on the cluster. 
(Yufei Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a96afae1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a96afae1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a96afae1

Branch: refs/heads/HADOOP-13345
Commit: a96afae125ba02fb4480542d3fb0891623ee4c37
Parents: c5ee7fde
Author: Karthik Kambatla 
Authored: Thu Mar 9 23:11:54 2017 -0800
Committer: Karthik Kambatla 
Committed: Thu Mar 9 23:11:54 2017 -0800

--
 .../hadoop/yarn/util/resource/Resources.java|  7 +
 .../yarn/util/resource/TestResources.java   | 24 -
 .../scheduler/fair/FSLeafQueue.java |  3 ++-
 .../scheduler/fair/TestFairScheduler.java   | 28 ++--
 4 files changed, 46 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a96afae1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 57b3a46..7020300 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -242,6 +242,13 @@ public class Resources {
 out.setVirtualCores((int)(lhs.getVirtualCores() * by));
 return out;
   }
+
+  public static Resource multiplyAndRoundUp(Resource lhs, double by) {
+Resource out = clone(lhs);
+out.setMemorySize((long)Math.ceil(lhs.getMemorySize() * by));
+out.setVirtualCores((int)Math.ceil(lhs.getVirtualCores() * by));
+return out;
+  }
   
   public static Resource normalize(
   ResourceCalculator calculator, Resource lhs, Resource min,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a96afae1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
index 057214b..f8570a8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.util.resource;
 
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 public class TestResources {
@@ -46,5 +48,25 @@ public class TestResources {
 assertTrue(Resources.none().compareTo(
 createResource(0, 1)) < 0);
   }
-  
+
+  @Test
+  public void testMultipleRoundUp() {
+final double by = 0.5;
+final String memoryErrorMsg = "Invalid memory size.";
+final String vcoreErrorMsg = "Invalid virtual core number.";
+Resource resource = Resources.createResource(1, 1);
+Resource result = Resources.multiplyAndRoundUp(resource, by);
+assertEquals(memoryErrorMsg, result.getMemorySize(), 1);
+assertEquals(vcoreErrorMsg, result.getVirtualCores(), 1);
+
+resource = Resources.createResource(2, 2);
+result = Resources.multiplyAndRoundUp(resource, by);
+assertEquals(memoryErrorMsg, result.getMemorySize(), 1);
+assertEquals(vcoreErrorMsg, result.getVirtualCores(), 1);
+
+resource = Resources.createResource(0, 0);
+result = Resources.multiplyAndRoundUp(resource, by);
+assertEquals(memoryErrorMsg, result.getMemorySize(), 0);
+assertEquals(vcoreErrorMsg, result.getVirtualCores(), 0);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a96afae1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 

[14/50] [abbrv] hadoop git commit: HDFS-11508. Fix bind failure in SimpleTCPServer & Portmap where bind fails because socket is in TIME_WAIT state. Contributed by Mukul Kumar Singh.

2017-03-13 Thread stevel
HDFS-11508. Fix bind failure in SimpleTCPServer & Portmap where bind fails 
because socket is in TIME_WAIT state. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f9848df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f9848df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f9848df

Branch: refs/heads/HADOOP-13345
Commit: 1f9848dfe1fc9148cbbcfcc3dfed948b9e0f3c3c
Parents: 959940b
Author: Arpit Agarwal 
Authored: Tue Mar 7 11:41:05 2017 -0800
Committer: Arpit Agarwal 
Committed: Tue Mar 7 11:41:05 2017 -0800

--
 .../src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java| 1 +
 .../src/main/java/org/apache/hadoop/portmap/Portmap.java   | 2 ++
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f9848df/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
index 99d1d6f..f7ab52e 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
@@ -81,6 +81,7 @@ public class SimpleTcpServer {
 });
 server.setOption("child.tcpNoDelay", true);
 server.setOption("child.keepAlive", true);
+server.setOption("reuseAddress", true);
 
 // Listen to TCP port
 ch = server.bind(new InetSocketAddress(port));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f9848df/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
index 2b88791..94d76d0 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
@@ -109,12 +109,14 @@ final class Portmap {
 RpcUtil.STAGE_RPC_TCP_RESPONSE);
   }
 });
+tcpServer.setOption("reuseAddress", true);
 
 udpServer = new ConnectionlessBootstrap(new NioDatagramChannelFactory(
 Executors.newCachedThreadPool()));
 
 udpServer.setPipeline(Channels.pipeline(RpcUtil.STAGE_RPC_MESSAGE_PARSER,
 handler, RpcUtil.STAGE_RPC_UDP_RESPONSE));
+udpServer.setOption("reuseAddress", true);
 
 tcpChannel = tcpServer.bind(tcpAddress);
 udpChannel = udpServer.bind(udpAddress);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: HDFS-11340. DataNode reconfigure for disks doesn't remove the failed volumes. (Manoj Govindassamy via lei)

2017-03-13 Thread stevel
HDFS-11340. DataNode reconfigure for disks doesn't remove the failed volumes. 
(Manoj Govindassamy via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d356b6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d356b6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d356b6b

Branch: refs/heads/HADOOP-13345
Commit: 6d356b6b4d8ccb32397cacfb5d0357b21f6035fc
Parents: 9649c27
Author: Lei Xu 
Authored: Fri Mar 10 14:36:51 2017 -0800
Committer: Lei Xu 
Committed: Fri Mar 10 14:37:13 2017 -0800

--
 .../hadoop/hdfs/server/datanode/DataNode.java   |  73 +---
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  14 ++-
 .../datanode/fsdataset/impl/FsVolumeList.java   |  13 ++-
 .../TestDataNodeVolumeFailureReporting.java | 116 +--
 4 files changed, 184 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d356b6b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 6f24858..5a82850 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -652,48 +652,84 @@ public class DataNode extends ReconfigurableBase
   ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException {
 Configuration conf = new Configuration();
 conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
-List locations = getStorageLocations(conf);
+List newStorageLocations = getStorageLocations(conf);
 
-if (locations.isEmpty()) {
+if (newStorageLocations.isEmpty()) {
   throw new IOException("No directory is specified.");
 }
 
-// Use the existing StorageLocation to detect storage type changes.
-Map existingLocations = new HashMap<>();
+// Use the existing storage locations from the current conf
+// to detect new storage additions or removals.
+Map existingStorageLocations = new HashMap<>();
 for (StorageLocation loc : getStorageLocations(getConf())) {
-  existingLocations.put(loc.getNormalizedUri().toString(), loc);
+  existingStorageLocations.put(loc.getNormalizedUri().toString(), loc);
 }
 
 ChangedVolumes results = new ChangedVolumes();
-results.newLocations.addAll(locations);
+results.newLocations.addAll(newStorageLocations);
 
 for (Iterator it = storage.dirIterator();
  it.hasNext(); ) {
   Storage.StorageDirectory dir = it.next();
   boolean found = false;
-  for (Iterator sl = results.newLocations.iterator();
-   sl.hasNext(); ) {
-StorageLocation location = sl.next();
-if (location.matchesStorageDirectory(dir)) {
-  sl.remove();
-  StorageLocation old = existingLocations.get(
-  location.getNormalizedUri().toString());
-  if (old != null &&
-  old.getStorageType() != location.getStorageType()) {
+  for (Iterator newLocationItr =
+   results.newLocations.iterator(); newLocationItr.hasNext();) {
+StorageLocation newLocation = newLocationItr.next();
+if (newLocation.matchesStorageDirectory(dir)) {
+  StorageLocation oldLocation = existingStorageLocations.get(
+  newLocation.getNormalizedUri().toString());
+  if (oldLocation != null &&
+  oldLocation.getStorageType() != newLocation.getStorageType()) {
 throw new IOException("Changing storage type is not allowed.");
   }
-  results.unchangedLocations.add(location);
+  // Update the unchanged locations as this location
+  // from the new conf is really not a new one.
+  newLocationItr.remove();
+  results.unchangedLocations.add(newLocation);
   found = true;
   break;
 }
   }
 
+  // New conf doesn't have the storage location which available in
+  // the current storage locations. Add to the deactivateLocations list.
   if (!found) {
+LOG.info("Deactivation request received for active volume: "
++ dir.getRoot().toString());
 results.deactivateLocations.add(
 StorageLocation.parse(dir.getRoot().toString()));
   }
 }
 
+// Use the failed storage locations from the current conf
+// to detect removals in the new conf.
+if 

[36/50] [abbrv] hadoop git commit: HADOOP-14123. Remove misplaced ADL service provider config file for FileSystem. Contributed by John Zhuge.

2017-03-13 Thread stevel
HADOOP-14123. Remove misplaced ADL service provider config file for FileSystem. 
Contributed by John Zhuge.

Change-Id: Ic956e2eb8189625916442eaffdc69163d32f730e


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5ee7fde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5ee7fde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5ee7fde

Branch: refs/heads/HADOOP-13345
Commit: c5ee7fded46dcb1ac1ea4c1ada4949c50bc89afb
Parents: 846a0cd
Author: John Zhuge 
Authored: Sun Mar 5 22:34:22 2017 -0800
Committer: John Zhuge 
Committed: Thu Mar 9 18:30:17 2017 -0800

--
 .../META-INF/org.apache.hadoop.fs.FileSystem| 16 
 1 file changed, 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5ee7fde/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
 
b/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 7ec7812..000
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.adl.AdlFileSystem
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: HADOOP-14150. Implement getHomeDirectory() method in NativeAzureFileSystem. Contributed by Santhosh G Nayak

2017-03-13 Thread stevel
HADOOP-14150. Implement getHomeDirectory() method in NativeAzureFileSystem. 
Contributed by Santhosh G Nayak


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28daaf0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28daaf0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28daaf0e

Branch: refs/heads/HADOOP-13345
Commit: 28daaf0eb206d723d2baf0f9d91e43d98bb2fd26
Parents: 1598fd3
Author: Mingliang Liu 
Authored: Tue Mar 7 14:55:52 2017 -0800
Committer: Mingliang Liu 
Committed: Tue Mar 7 14:55:52 2017 -0800

--
 .../org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28daaf0e/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 6de0a28..9aebbb5 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -84,6 +84,7 @@ import com.microsoft.azure.storage.StorageException;
 @InterfaceStability.Stable
 public class NativeAzureFileSystem extends FileSystem {
   private static final int USER_WX_PERMISION = 0300;
+  private static final String USER_HOME_DIR_PREFIX_DEFAULT = "/user";
   /**
* A description of a folder rename operation, including the source and
* destination keys, and descriptions of the files in the source folder.
@@ -1129,6 +1130,8 @@ public class NativeAzureFileSystem extends FileSystem {
*/
   private WasbAuthorizerInterface authorizer = null;
 
+  private UserGroupInformation ugi;
+
   private String delegationToken = null;
 
   public NativeAzureFileSystem() {
@@ -1247,6 +1250,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
 store.initialize(uri, conf, instrumentation);
 setConf(conf);
+this.ugi = UserGroupInformation.getCurrentUser();
 this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
 this.workingDir = new Path("/user", UserGroupInformation.getCurrentUser()
 .getShortUserName()).makeQualified(getUri(), getWorkingDirectory());
@@ -1276,6 +1280,12 @@ public class NativeAzureFileSystem extends FileSystem {
 }
   }
 
+  @Override
+  public Path getHomeDirectory() {
+return makeQualified(new Path(
+USER_HOME_DIR_PREFIX_DEFAULT + "/" + this.ugi.getShortUserName()));
+  }
+
   @VisibleForTesting
   public void updateWasbAuthorizer(WasbAuthorizerInterface authorizer) {
 this.authorizer = authorizer;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: HDFS-11314. Enforce set of enabled EC policies on the NameNode.

2017-03-13 Thread stevel
HDFS-11314. Enforce set of enabled EC policies on the NameNode.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33a38a53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33a38a53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33a38a53

Branch: refs/heads/HADOOP-13345
Commit: 33a38a534110de454662256545a7f4c075d328c8
Parents: 5ca6ef0
Author: Andrew Wang 
Authored: Wed Mar 8 16:41:44 2017 -0800
Committer: Andrew Wang 
Committed: Wed Mar 8 16:41:44 2017 -0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +
 .../namenode/ErasureCodingPolicyManager.java|  97 
 .../server/namenode/FSDirErasureCodingOp.java   |  54 +++
 .../server/namenode/FSImageFormatPBINode.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   4 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  24 ++-
 .../src/main/resources/hdfs-default.xml |   9 ++
 .../src/site/markdown/HDFSErasureCoding.md  |  45 +++---
 .../apache/hadoop/cli/TestErasureCodingCLI.java |   3 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  11 ++
 .../TestDFSRSDefault10x4StripedInputStream.java |   2 +-
 ...TestDFSRSDefault10x4StripedOutputStream.java |   2 +-
 ...fault10x4StripedOutputStreamWithFailure.java |   2 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |   2 +
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   1 +
 .../TestDFSStripedOutputStreamWithFailure.java  |   1 +
 .../hdfs/TestDFSXORStripedInputStream.java  |   2 +-
 .../hdfs/TestDFSXORStripedOutputStream.java |   2 +-
 ...estDFSXORStripedOutputStreamWithFailure.java |   2 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  41 -
 .../TestUnsetAndChangeDirectoryEcPolicy.java|   5 +-
 .../server/namenode/TestEnabledECPolicies.java  | 151 +++
 .../hdfs/server/namenode/TestFSImage.java   |   9 +-
 .../TestOfflineImageViewer.java |  11 +-
 .../test/resources/testErasureCodingConf.xml|  21 ++-
 26 files changed, 399 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a38a53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 82d6073..3fc4980 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -562,6 +562,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT =
   "10m";
 
+  public static final String  DFS_NAMENODE_EC_POLICIES_ENABLED_KEY = 
"dfs.namenode.ec.policies.enabled";
+  public static final String  DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = 
"RS-6-3-64k";
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.threads";
   public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20;
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.buffer.size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a38a53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index a1b2270..02cbbdf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -18,12 +18,17 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 
+import java.util.Arrays;
 import java.util.Map;
 import java.util.TreeMap;
+import 

[16/50] [abbrv] hadoop git commit: MAPREDUCE-6839. TestRecovery.testCrashed failed (pairg via rkanter)

2017-03-13 Thread stevel
MAPREDUCE-6839. TestRecovery.testCrashed failed (pairg via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38d75dfd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38d75dfd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38d75dfd

Branch: refs/heads/HADOOP-13345
Commit: 38d75dfd3a643f8a1acd52e025a466d65065b60e
Parents: e0c239c
Author: Robert Kanter 
Authored: Tue Mar 7 13:34:46 2017 -0800
Committer: Robert Kanter 
Committed: Tue Mar 7 13:34:46 2017 -0800

--
 .../apache/hadoop/mapreduce/v2/app/TestRecovery.java| 12 +++-
 1 file changed, 3 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38d75dfd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
index 071575a..6332c5d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
@@ -159,9 +159,7 @@ public class TestRecovery {
 app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
 app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
 
-// reduces must be in NEW state
-Assert.assertEquals("Reduce Task state not correct",
-TaskState.RUNNING, reduceTask.getReport().getTaskState());
+app.waitForState(reduceTask, TaskState.RUNNING);
 
 /// Play some games with the TaskAttempts of the first task //
 //send the fail signal to the 1st map task attempt
@@ -1301,9 +1299,7 @@ public class TestRecovery {
 app.waitForState(task1Attempt2, TaskAttemptState.RUNNING);
 app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
 
-// reduces must be in NEW state
-Assert.assertEquals("Reduce Task state not correct",
-TaskState.RUNNING, reduceTask.getReport().getTaskState());
+app.waitForState(reduceTask, TaskState.RUNNING);
 
 //send the done signal to the map 1 attempt 1
 app.getContext().getEventHandler().handle(
@@ -1431,9 +1427,7 @@ public class TestRecovery {
 app.waitForState(task1Attempt, TaskAttemptState.RUNNING);
 app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
 
-// reduces must be in NEW state
-Assert.assertEquals("Reduce Task state not correct",
-TaskState.RUNNING, reduceTask.getReport().getTaskState());
+app.waitForState(reduceTask, TaskState.RUNNING);
 
 //send the done signal to the 1st map attempt
 app.getContext().getEventHandler().handle(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: YARN-6275. Fail to show real-time tracking charts in SLS (yufeigu via rkanter)

2017-03-13 Thread stevel
YARN-6275. Fail to show real-time tracking charts in SLS (yufeigu via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1598fd3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1598fd3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1598fd3b

Branch: refs/heads/HADOOP-13345
Commit: 1598fd3b7948b3592775e3be3227c4a336122bc9
Parents: 38d75df
Author: Robert Kanter 
Authored: Tue Mar 7 13:47:52 2017 -0800
Committer: Robert Kanter 
Committed: Tue Mar 7 13:47:52 2017 -0800

--
 hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh   | 8 ++--
 .../main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java  | 2 ++
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1598fd3b/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
--
diff --git a/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh 
b/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
index 19b5c34..fb53045 100644
--- a/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
+++ b/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
@@ -103,12 +103,16 @@ function run_simulation() {
   hadoop_java_exec sls org.apache.hadoop.yarn.sls.SLSRunner ${args}
 }
 
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+
+# copy 'html' directory to current directory to make sure web sever can access
+cp -r "${bin}/../html" "$(pwd)"
+
 # let's locate libexec...
 if [[ -n "${HADOOP_HOME}" ]]; then
   HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
 else
-  this="${BASH_SOURCE-$0}"
-  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
   HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../../../../../libexec"
 fi
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1598fd3b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
index abdf106..33d4846 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.sls.SLSRunner;
 import org.apache.hadoop.yarn.sls.scheduler.FairSchedulerMetrics;
 import org.apache.hadoop.yarn.sls.scheduler.SchedulerMetrics;
 import org.apache.hadoop.yarn.sls.scheduler.SchedulerWrapper;
+import org.eclipse.jetty.http.MimeTypes;
 import org.eclipse.jetty.server.Handler;
 import org.eclipse.jetty.server.Request;
 import org.eclipse.jetty.server.Server;
@@ -118,6 +119,7 @@ public class SLSWebApp extends HttpServlet {
   public void start() throws Exception {
 // static files
 final ResourceHandler staticHandler = new ResourceHandler();
+staticHandler.setMimeTypes(new MimeTypes());
 staticHandler.setResourceBase("html");
 
 Handler handler = new AbstractHandler() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: HADOOP-14062. ApplicationMasterProtocolPBClientImpl.allocate fails with EOFException when RPC privacy is enabled. Contributed by Steven Rand

2017-03-13 Thread stevel
HADOOP-14062. ApplicationMasterProtocolPBClientImpl.allocate fails with 
EOFException when RPC privacy is enabled. Contributed by Steven Rand


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/241c1cc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/241c1cc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/241c1cc0

Branch: refs/heads/HADOOP-13345
Commit: 241c1cc05b71f8b719a85c06e3df930639630726
Parents: 287ba4f
Author: Jian He 
Authored: Wed Mar 8 10:48:27 2017 -0800
Committer: Jian He 
Committed: Wed Mar 8 10:48:27 2017 -0800

--
 .../main/java/org/apache/hadoop/ipc/Client.java |  4 +++-
 .../yarn/client/api/impl/TestAMRMClient.java| 24 
 2 files changed, 27 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/241c1cc0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 70b902c..c0a5be9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1768,7 +1768,9 @@ public class Client implements AutoCloseable {
 }
 
 void setSaslClient(SaslRpcClient client) throws IOException {
-  setInputStream(client.getInputStream(in));
+  // Wrap the input stream in a BufferedInputStream to fill the buffer
+  // before reading its length (HADOOP-14062).
+  setInputStream(new BufferedInputStream(client.getInputStream(in)));
   setOutputStream(client.getOutputStream(out));
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/241c1cc0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 43c0271..a52963a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -137,6 +137,11 @@ public class TestAMRMClient {
 // set the minimum allocation so that resource decrease can go under 1024
 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
 conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
+createClientAndCluster(conf);
+  }
+
+  private static void createClientAndCluster(Configuration conf)
+  throws Exception {
 yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), 
nodeCount, 1, 1);
 yarnCluster.init(conf);
 yarnCluster.start();
@@ -862,6 +867,25 @@ public class TestAMRMClient {
   }
 
   @Test (timeout=6)
+  public void testAMRMClientWithSaslEncryption() throws Exception {
+conf.set("hadoop.rpc.protection", "privacy");
+// we have to create a new instance of MiniYARNCluster to avoid SASL qop
+// mismatches between client and server
+tearDown();
+createClientAndCluster(conf);
+startApp();
+initAMRMClientAndTest(false);
+
+// recreate the original MiniYARNCluster and YarnClient for other tests
+conf.unset("hadoop.rpc.protection");
+tearDown();
+createClientAndCluster(conf);
+// unless we start an application the cancelApp() method will fail when
+// it runs after this test
+startApp();
+  }
+
+  @Test (timeout=6)
   public void testAMRMClientAllocReqId() throws YarnException, IOException {
 initAMRMClientAndTest(true);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HADOOP-13345

2017-03-13 Thread stevel
Merge branch 'trunk' into HADOOP-13345


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/566c8b74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/566c8b74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/566c8b74

Branch: refs/heads/HADOOP-13345
Commit: 566c8b74fe7a5a72fc2cc8c8e5a449b0070d6b7c
Parents: 090924a 7992426
Author: Steve Loughran 
Authored: Mon Mar 13 13:55:38 2017 +
Committer: Steve Loughran 
Committed: Mon Mar 13 13:55:38 2017 +

--
 hadoop-client-modules/hadoop-client/pom.xml |   4 -
 .../hadoop-cloud-storage/pom.xml|   4 -
 hadoop-common-project/hadoop-common/pom.xml |   5 -
 .../src/main/conf/log4j.properties  |   9 +
 .../fs/CommonConfigurationKeysPublic.java   |  29 +-
 .../org/apache/hadoop/fs/shell/Command.java |  14 +-
 .../java/org/apache/hadoop/io/SequenceFile.java |  20 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |   4 +-
 .../java/org/apache/hadoop/net/InnerNode.java   |   2 +-
 .../org/apache/hadoop/net/InnerNodeImpl.java|  22 +-
 .../org/apache/hadoop/net/NetworkTopology.java  |  27 +-
 .../java/org/apache/hadoop/util/ConfTest.java   |   2 +-
 .../org/apache/hadoop/util/StringUtils.java |  11 +
 .../src/main/resources/core-default.xml |  35 ++
 .../src/site/markdown/CLIMiniCluster.md.vm  |   1 +
 .../src/site/markdown/ClusterSetup.md   |   4 +-
 .../hadoop-common/src/site/markdown/Metrics.md  |   7 +-
 .../site/markdown/filesystem/introduction.md|  85 
 .../conf/TestCommonConfigurationFields.java |   1 +
 .../org/apache/hadoop/io/TestSequenceFile.java  |  68 ++-
 .../apache/hadoop/net/TestClusterTopology.java  |   5 +-
 hadoop-common-project/hadoop-kms/pom.xml|   4 -
 .../key/kms/server/KMSAuthenticationFilter.java |   3 +-
 .../hadoop-kms/src/site/markdown/index.md.vm|   2 +-
 .../apache/hadoop/oncrpc/SimpleTcpServer.java   |   1 +
 .../apache/hadoop/oncrpc/SimpleUdpServer.java   |   1 +
 .../java/org/apache/hadoop/portmap/Portmap.java |   2 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   4 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  11 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  10 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|   5 +-
 .../hadoop/hdfs/protocol/DatanodeInfo.java  |  52 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java |  10 +-
 .../hadoop/hdfs/protocol/HdfsFileStatus.java|   2 +-
 .../ClientNamenodeProtocolTranslatorPB.java |   6 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |   8 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |   2 +
 .../hdfs/web/resources/DeleteOpParam.java   |  11 +-
 .../hadoop/hdfs/web/resources/GetOpParam.java   |  11 +-
 .../hadoop/hdfs/web/resources/PostOpParam.java  |  11 +-
 .../hadoop/hdfs/web/resources/PutOpParam.java   |  11 +-
 .../src/main/proto/erasurecoding.proto  |   2 +-
 .../src/main/proto/hdfs.proto   |   2 +
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  |   8 -
 .../dev-support/findbugsExcludeFile.xml |   6 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  16 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |   4 +-
 .../server/blockmanagement/BlockManager.java|  22 +-
 .../blockmanagement/DatanodeDescriptor.java |  13 +
 .../server/blockmanagement/SlowPeerTracker.java |   4 +-
 .../apache/hadoop/hdfs/server/common/Util.java  |  17 +
 .../hdfs/server/datanode/BPServiceActor.java|   2 +-
 .../hadoop/hdfs/server/datanode/DNConf.java |  17 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  83 +++-
 .../hdfs/server/datanode/FileIoProvider.java|   2 +-
 .../server/datanode/ProfilingFileIoEvents.java  |  11 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  28 +-
 .../datanode/fsdataset/impl/FsVolumeList.java   |  13 +-
 .../datanode/metrics/DataNodeDiskMetrics.java   | 181 +++
 .../datanode/metrics/DataNodePeerMetrics.java   |   6 +-
 .../datanode/metrics/OutlierDetector.java   | 182 +++
 .../datanode/metrics/SlowNodeDetector.java  | 194 
 .../web/RestCsrfPreventionFilterHandler.java|   6 +-
 .../datanode/web/webhdfs/WebHdfsHandler.java|  33 +-
 .../namenode/ErasureCodingPolicyManager.java| 107 ++--
 .../server/namenode/FSDirErasureCodingOp.java   | 139 +++---
 .../server/namenode/FSDirStatAndListingOp.java  |   4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  48 +-
 .../hdfs/server/namenode/FSDirectory.java   |   3 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  23 +-
 .../hdfs/server/namenode/FSImageFormat.java |   6 +-
 .../server/namenode/FSImageFormatPBINode.java   |  29 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  28 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  79 ++-
 

[26/50] [abbrv] hadoop git commit: YARN-6165. Intra-queue preemption occurs even when preemption is turned off for a specific queue. Contributed by Eric Payne

2017-03-13 Thread stevel
YARN-6165. Intra-queue preemption occurs even when preemption is turned off for 
a specific queue. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7762a55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7762a55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7762a55

Branch: refs/heads/HADOOP-13345
Commit: d7762a55113a529abd6f4ecb8e6d9b0a84b56e08
Parents: 2be8947
Author: Jason Lowe 
Authored: Wed Mar 8 16:46:09 2017 -0600
Committer: Jason Lowe 
Committed: Wed Mar 8 16:46:09 2017 -0600

--
 .../capacity/IntraQueueCandidatesSelector.java  |  5 ++
 ...ionalCapacityPreemptionPolicyIntraQueue.java | 55 
 2 files changed, 60 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7762a55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
index 4f2b272..2890414 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
@@ -112,6 +112,11 @@ public class IntraQueueCandidatesSelector extends 
PreemptionCandidatesSelector {
   continue;
 }
 
+// Don't preempt if disabled for this queue.
+if (leafQueue.getPreemptionDisabled()) {
+  continue;
+}
+
 // 5. Calculate the resource to obtain per partition
 Map resToObtainByPartition = 
fifoPreemptionComputePlugin
 .getResourceDemandFromAppsPerQueue(queueName, partition);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7762a55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java
index 19fb0d2..bf83e1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java
@@ -106,6 +106,61 @@ public class 
TestProportionalCapacityPreemptionPolicyIntraQueue
   }
 
   @Test
+  public void testNoIntraQueuePreemptionWithPreemptionDisabledOnQueues()
+  throws IOException {
+/**
+ * This test has the same configuration as testSimpleIntraQueuePreemption
+ * except that preemption is disabled specifically for each queue. The
+ * purpose is to test that disabling preemption on a specific queue will
+ * avoid intra-queue preemption.
+ */
+conf.setPreemptionDisabled("root.a", true);
+conf.setPreemptionDisabled("root.b", true);
+conf.setPreemptionDisabled("root.c", true);
+conf.setPreemptionDisabled("root.d", true);
+
+String labelsConfig = "=100,true;";
+String nodesConfig = // n1 has no label
+"n1= res=100";
+String queuesConfig =
+// guaranteed,max,used,pending,reserved
+"root(=[100 100 80 120 0]);" + // root
+"-a(=[11 100 11 50 0]);" + // a
+"-b(=[40 100 38 60 0]);" + // b
+"-c(=[20 100 10 10 0]);" + // c
+

[48/50] [abbrv] hadoop git commit: HADOOP-14156. Fix grammar error in ConfTest.java.

2017-03-13 Thread stevel
HADOOP-14156. Fix grammar error in ConfTest.java.

This closes #187

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04a5f5a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04a5f5a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04a5f5a6

Branch: refs/heads/HADOOP-13345
Commit: 04a5f5a6dc88769cca8b1a15057a0756712b5013
Parents: 4db9cc7
Author: Andrey Dyatlov 
Authored: Mon Feb 6 19:05:58 2017 +0100
Committer: Akira Ajisaka 
Committed: Mon Mar 13 16:15:53 2017 +0900

--
 .../src/main/java/org/apache/hadoop/util/ConfTest.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04a5f5a6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
index 3f37f5a..1915e79 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
@@ -269,7 +269,7 @@ public final class ConfTest {
 } else {
   String confDirName = System.getenv(HADOOP_CONF_DIR);
   if (confDirName == null) {
-terminate(1, HADOOP_CONF_DIR + " does not defined");
+terminate(1, HADOOP_CONF_DIR + " is not defined");
   }
   File confDir = new File(confDirName);
   if (!confDir.isDirectory()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: Treat encrypted files as private. Contributed by Daniel Templeton.

2017-03-13 Thread stevel
Treat encrypted files as private. Contributed by Daniel Templeton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f01a69f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f01a69f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f01a69f8

Branch: refs/heads/HADOOP-13345
Commit: f01a69f84f4cc7d925d078a7ce32e5800da4e429
Parents: 1441398
Author: Akira Ajisaka 
Authored: Tue Mar 7 13:22:11 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Mar 7 13:22:11 2017 +0900

--
 .../filecache/ClientDistributedCacheManager.java   | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f01a69f8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
index 73a0330..9f8edb5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
@@ -294,10 +294,21 @@ public class ClientDistributedCacheManager {
   FsAction action, Map statCache) throws IOException {
 FileStatus status = getFileStatus(fs, path.toUri(), statCache);
 FsPermission perms = status.getPermission();
-FsAction otherAction = perms.getOtherAction();
-if (otherAction.implies(action)) {
-  return true;
+
+// Encrypted files are always treated as private. This stance has two
+// important side effects.  The first is that the encrypted files will be
+// downloaded as the job owner instead of the YARN user, which is required
+// for the KMS ACLs to work as expected.  Second, it prevent a file with
+// world readable permissions that is stored in an encryption zone from
+// being localized as a publicly shared file with world readable
+// permissions.
+if (!perms.getEncryptedBit()) {
+  FsAction otherAction = perms.getOtherAction();
+  if (otherAction.implies(action)) {
+return true;
+  }
 }
+
 return false;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: HADOOP-14087. S3A typo in pom.xml test exclusions. Contributed by Aaron Fabbri.

2017-03-13 Thread stevel
HADOOP-14087. S3A typo in pom.xml test exclusions. Contributed by Aaron Fabbri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f597f4c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f597f4c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f597f4c4

Branch: refs/heads/HADOOP-13345
Commit: f597f4c43e0a6e2304b9bcaf727d6d8d15a365f9
Parents: f01a69f
Author: Akira Ajisaka 
Authored: Tue Mar 7 15:14:55 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Mar 7 15:14:55 2017 +0900

--
 hadoop-tools/hadoop-aws/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f597f4c4/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index e5bbbfd..c188055 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -184,7 +184,7 @@
 **/ITest*Root*.java
 **/ITestS3AFileContextStatistics.java
 **/ITestS3AEncryptionSSE*.java
-**/ITestS3AHuge*.java
+**/ITestS3AHuge*.java
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: YARN-6321. TestResources test timeouts are too aggressive. Contributed by Eric Badger

2017-03-13 Thread stevel
YARN-6321. TestResources test timeouts are too aggressive. Contributed by Eric 
Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9649c278
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9649c278
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9649c278

Branch: refs/heads/HADOOP-13345
Commit: 9649c27864a23ea156bae904368c1d3cf94c6e9d
Parents: 092ec39
Author: Jason Lowe 
Authored: Fri Mar 10 13:05:55 2017 -0600
Committer: Jason Lowe 
Committed: Fri Mar 10 13:06:54 2017 -0600

--
 .../org/apache/hadoop/yarn/util/resource/TestResources.java| 6 +++---
 .../yarn/server/resourcemanager/resource/TestResources.java| 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9649c278/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
index f8570a8..d79179a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
@@ -30,7 +30,7 @@ public class TestResources {
 return Resource.newInstance(memory, vCores);
   }
 
-  @Test(timeout=1000)
+  @Test(timeout=1)
   public void testCompareToWithUnboundedResource() {
 assertTrue(Resources.unbounded().compareTo(
 createResource(Long.MAX_VALUE, Integer.MAX_VALUE)) == 0);
@@ -40,7 +40,7 @@ public class TestResources {
 createResource(0, Integer.MAX_VALUE)) > 0);
   }
 
-  @Test(timeout=1000)
+  @Test(timeout=1)
   public void testCompareToWithNoneResource() {
 assertTrue(Resources.none().compareTo(createResource(0, 0)) == 0);
 assertTrue(Resources.none().compareTo(
@@ -49,7 +49,7 @@ public class TestResources {
 createResource(0, 1)) < 0);
   }
 
-  @Test
+  @Test(timeout=1)
   public void testMultipleRoundUp() {
 final double by = 0.5;
 final String memoryErrorMsg = "Invalid memory size.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9649c278/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java
index ae98660..2a10747 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.*;
 import org.junit.Test;
 
 public class TestResources {
-  @Test(timeout=1000)
+  @Test(timeout=1)
   public void testFitsIn() {
 assertTrue(fitsIn(createResource(1, 1), createResource(2, 2)));
 assertTrue(fitsIn(createResource(2, 2), createResource(2, 2)));
@@ -31,7 +31,7 @@ public class TestResources {
 assertFalse(fitsIn(createResource(2, 1), createResource(1, 2)));
   }
   
-  @Test(timeout=1000)
+  @Test(timeout=1)
   public void testComponentwiseMin() {
 assertEquals(createResource(1, 1),
 componentwiseMin(createResource(1, 1), createResource(2, 2)));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] [abbrv] hadoop git commit: HDFS-11477. Simplify file IO profiling configuration. Contributed by Hanisha Koneru.

2017-03-13 Thread stevel
HDFS-11477. Simplify file IO profiling configuration. Contributed by Hanisha 
Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/959940b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/959940b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/959940b0

Branch: refs/heads/HADOOP-13345
Commit: 959940b0ab563b4e42bace44f1dc9a8babcaa889
Parents: f597f4c
Author: Arpit Agarwal 
Authored: Tue Mar 7 10:12:35 2017 -0800
Committer: Arpit Agarwal 
Committed: Tue Mar 7 10:12:35 2017 -0800

--
 .../hadoop-common/src/site/markdown/Metrics.md |  7 ++-
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  6 +-
 .../org/apache/hadoop/hdfs/server/common/Util.java | 17 +
 .../apache/hadoop/hdfs/server/datanode/DNConf.java |  7 ---
 .../hdfs/server/datanode/FileIoProvider.java   |  2 +-
 .../server/datanode/ProfilingFileIoEvents.java | 11 +--
 .../server/datanode/TestDataNodeVolumeMetrics.java |  4 ++--
 .../apache/hadoop/tools/TestHdfsConfigFields.java  |  2 --
 8 files changed, 36 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/959940b0/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 7900692..a8bdbeb 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -332,7 +332,12 @@ Each metrics record contains tags such as SessionId and 
Hostname as additional i
 FsVolume
 
 
-Per-volume metrics contain Datanode Volume IO related statistics. Per-volume 
metrics are off by default. They can be enbabled by setting 
`dfs.datanode.enable.fileio.profiling` to **true**, but enabling per-volume 
metrics may have a performance impact. Each metrics record contains tags such 
as Hostname as additional information along with metrics.
+Per-volume metrics contain Datanode Volume IO related statistics. Per-volume
+metrics are off by default. They can be enabled by setting `dfs.datanode
+.fileio.profiling.sampling.fraction` to a fraction between 0.0 and 1.0.
+Setting this value to 0.0 would mean profiling is not enabled. But enabling
+per-volume metrics may have a performance impact. Each metrics record
+contains tags such as Hostname as additional information along with metrics.
 
 | Name | Description |
 |: |: |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/959940b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index be20829..82d6073 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -718,10 +718,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final long DFS_EDIT_LOG_TRANSFER_RATE_DEFAULT = 0; //no 
throttling
 
   // Datanode File IO Stats
-  public static final String DFS_DATANODE_ENABLE_FILEIO_PROFILING_KEY =
-  "dfs.datanode.enable.fileio.profiling";
-  public static final boolean DFS_DATANODE_ENABLE_FILEIO_PROFILING_DEFAULT =
-  false;
   public static final String DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY =
   "dfs.datanode.enable.fileio.fault.injection";
   public static final boolean
@@ -730,7 +726,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY =
   "dfs.datanode.fileio.profiling.sampling.fraction";
   public static final double
-  DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEAFULT = 1.0;
+  DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT = 0.0;
 
   //Keys with no defaults
   public static final String  DFS_DATANODE_PLUGINS_KEY = 
"dfs.datanode.plugins";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/959940b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
index 9c67f0a..fdb09df 100644
--- 

[08/50] [abbrv] hadoop git commit: Revert "HADOOP-13930. Azure: Add Authorization support to WASB. Contributed by Sivaguru Sankaridurg and Dushyanth"

2017-03-13 Thread stevel
Revert "HADOOP-13930. Azure: Add Authorization support to WASB. Contributed by 
Sivaguru Sankaridurg and Dushyanth"

This reverts commit 6b7cd62b8cf12616b13142f2eb2cfc2f25796f0f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52d7d5aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52d7d5aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52d7d5aa

Branch: refs/heads/HADOOP-13345
Commit: 52d7d5aa1a303cf70519a61487641211f4267c6f
Parents: c571cda
Author: Mingliang Liu 
Authored: Mon Mar 6 17:10:11 2017 -0800
Committer: Mingliang Liu 
Committed: Mon Mar 6 17:10:11 2017 -0800

--
 .../src/main/resources/core-default.xml |  10 -
 .../conf/TestCommonConfigurationFields.java |   2 -
 .../fs/azure/AzureNativeFileSystemStore.java|   4 +-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 155 +--
 .../fs/azure/RemoteSASKeyGeneratorImpl.java | 183 
 .../fs/azure/RemoteWasbAuthorizerImpl.java  | 247 -
 .../fs/azure/SecureStorageInterfaceImpl.java|   6 +-
 .../fs/azure/WasbAuthorizationException.java|  40 ---
 .../fs/azure/WasbAuthorizationOperations.java   |  44 ---
 .../fs/azure/WasbAuthorizerInterface.java   |  47 
 .../hadoop/fs/azure/security/Constants.java |  54 
 .../security/WasbDelegationTokenIdentifier.java |  48 
 .../fs/azure/security/WasbTokenRenewer.java | 124 -
 .../hadoop/fs/azure/security/package.html   |  28 --
 ...apache.hadoop.security.token.TokenIdentifier |  16 --
 ...rg.apache.hadoop.security.token.TokenRenewer |  16 --
 .../hadoop-azure/src/site/markdown/index.md |  34 ---
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java | 102 ---
 .../TestNativeAzureFileSystemAuthorization.java | 277 ---
 19 files changed, 64 insertions(+), 1373 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52d7d5aa/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 52b58ed..35be56b 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1292,16 +1292,6 @@
 to specify the time (such as 2s, 2m, 1h, etc.).
   
 
-
-  fs.azure.authorization
-  false
-  
-Config flag to enable authorization support in WASB. Setting it to "true" 
enables
-authorization support to WASB. Currently WASB authorization requires a 
remote service
-to provide authorization that needs to be specified via 
fs.azure.authorization.remote.service.url
-configuration
-  
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52d7d5aa/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 7410d29..966a8ac 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -181,8 +181,6 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPropsToSkipCompare.add("io.compression.codec.bzip2.library");
 // - org.apache.hadoop.io.SequenceFile
 xmlPropsToSkipCompare.add("io.seqfile.local.dir");
-// - org.apache.hadoop.fs.azure.NativeAzureFileSystem
-xmlPropsToSkipCompare.add("fs.azure.authorization");
 
 
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52d7d5aa/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 9d7ac80..07c389c 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -303,7 +303,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   

[06/50] [abbrv] hadoop git commit: HDFS-10838. Last full block report received time for each DN should be easily discoverable. Contributed by Surendra Singh Lilhore.

2017-03-13 Thread stevel
HDFS-10838. Last full block report received time for each DN should be easily 
discoverable. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5adc5c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5adc5c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5adc5c3

Branch: refs/heads/HADOOP-13345
Commit: b5adc5c3011f111f86d232cb33ec522547f68a95
Parents: 5e74196
Author: Arpit Agarwal 
Authored: Mon Mar 6 16:39:53 2017 -0800
Committer: Arpit Agarwal 
Committed: Mon Mar 6 16:39:53 2017 -0800

--
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 52 +++-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  8 ++-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  2 +
 .../src/main/proto/hdfs.proto   |  2 +
 .../server/blockmanagement/BlockManager.java|  3 ++
 .../hdfs/server/namenode/FSNamesystem.java  |  8 ++-
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|  3 ++
 .../src/main/webapps/hdfs/dfshealth.html|  2 +
 .../src/main/webapps/hdfs/dfshealth.js  |  1 +
 .../server/namenode/TestNameNodeMXBean.java |  1 +
 10 files changed, 78 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5adc5c3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index acbcffa..e1698c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -85,6 +85,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
 
   protected AdminStates adminState;
   private long maintenanceExpireTimeInMS;
+  private long lastBlockReportTime;
+  private long lastBlockReportMonotonic;
 
   protected DatanodeInfo(DatanodeInfo from) {
 super(from);
@@ -101,6 +103,8 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.location = from.getNetworkLocation();
 this.adminState = from.getAdminState();
 this.upgradeDomain = from.getUpgradeDomain();
+this.lastBlockReportTime = from.getLastBlockReportTime();
+this.lastBlockReportMonotonic = from.getLastBlockReportMonotonic();
   }
 
   protected DatanodeInfo(DatanodeID nodeID) {
@@ -116,6 +120,8 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.lastUpdateMonotonic = 0L;
 this.xceiverCount = 0;
 this.adminState = null;
+this.lastBlockReportTime = 0L;
+this.lastBlockReportMonotonic = 0L;
   }
 
   protected DatanodeInfo(DatanodeID nodeID, String location) {
@@ -131,7 +137,8 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
   final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
   final long lastUpdate, final long lastUpdateMonotonic,
   final int xceiverCount, final String networkLocation,
-  final AdminStates adminState, final String upgradeDomain) {
+  final AdminStates adminState, final String upgradeDomain,
+  final long lastBlockReportTime, final long lastBlockReportMonotonic) {
 super(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
 ipcPort);
 this.capacity = capacity;
@@ -147,6 +154,8 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 this.location = networkLocation;
 this.adminState = adminState;
 this.upgradeDomain = upgradeDomain;
+this.lastBlockReportTime = lastBlockReportTime;
+this.lastBlockReportMonotonic = lastBlockReportMonotonic;
   }
 
   /** Network location name. */
@@ -391,6 +400,11 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 .append(percent2String(cacheRemainingPercent)).append("\n");
 buffer.append("Xceivers: ").append(getXceiverCount()).append("\n");
 buffer.append("Last contact: ").append(new Date(lastUpdate)).append("\n");
+buffer
+.append("Last Block Report: ")
+.append(
+lastBlockReportTime != 0 ? new Date(lastBlockReportTime) : "Never")
+.append("\n");
 return buffer.toString();
   }
 
@@ -503,6 +517,26 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 return this.maintenanceExpireTimeInMS;
   }
 
+  /** Sets the last block report time. */
+  public void setLastBlockReportTime(long lastBlockReportTime) {
+this.lastBlockReportTime = 

[10/50] [abbrv] hadoop git commit: MAPREDUCE-6855. Specify charset when create String in CredentialsTestJob. Contributed by Kai Sasaki.

2017-03-13 Thread stevel
MAPREDUCE-6855. Specify charset when create String in CredentialsTestJob. 
Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14413989
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14413989
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14413989

Branch: refs/heads/HADOOP-13345
Commit: 14413989cac9acc1fa6f8d330fac32f772613325
Parents: 6868235
Author: Akira Ajisaka 
Authored: Tue Mar 7 13:10:59 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Mar 7 13:10:59 2017 +0900

--
 .../org/apache/hadoop/mapreduce/security/CredentialsTestJob.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14413989/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
index e66fb2f..755e2df 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapreduce.security;
 
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 
 
 import org.apache.hadoop.conf.Configuration;
@@ -64,7 +65,7 @@ public class CredentialsTestJob extends Configured implements 
Tool {
 // fail the test
   }
 
-  String secretValueStr = new String (secretValue);
+  String secretValueStr = new String (secretValue, StandardCharsets.UTF_8);
   System.out.println(secretValueStr);
 
   if  ( !("password"+i).equals(secretValueStr)){


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: HADOOP-14048. REDO operation of WASB#AtomicRename should create placeholder blob for destination folder. Contributed by NITIN VERMA

2017-03-13 Thread stevel
HADOOP-14048. REDO operation of WASB#AtomicRename should create placeholder 
blob for destination folder. Contributed by NITIN VERMA


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c571cda5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c571cda5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c571cda5

Branch: refs/heads/HADOOP-13345
Commit: c571cda5c7d929477961dfff4176d7de4944d874
Parents: b5adc5c
Author: Mingliang Liu 
Authored: Mon Mar 6 16:53:30 2017 -0800
Committer: Mingliang Liu 
Committed: Mon Mar 6 17:00:13 2017 -0800

--
 .../org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c571cda5/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 0dfefaf..b1956a7 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -566,6 +566,16 @@ public class NativeAzureFileSystem extends FileSystem {
 // Remove the source folder. Don't check explicitly if it exists,
 // to avoid triggering redo recursively.
 try {
+  // Rename the source folder 0-byte root file
+  // as destination folder 0-byte root file.
+  FileMetadata srcMetaData = this.getSourceMetadata();
+  if (srcMetaData.getBlobMaterialization() == 
BlobMaterialization.Explicit) {
+// We already have a lease. So let's just rename the source blob
+// as destination blob under same lease.
+fs.getStoreInterface().rename(this.getSrcKey(), this.getDstKey(), 
false, lease);
+  }
+
+  // Now we can safely delete the source folder.
   fs.getStoreInterface().delete(srcKey, lease);
 } catch (Exception e) {
   LOG.info("Unable to delete source folder during folder rename redo. "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: HADOOP-13930. Azure: Add Authorization support to WASB. Contributed by Sivaguru Sankaridurg and Dushyanth

2017-03-13 Thread stevel
HADOOP-13930. Azure: Add Authorization support to WASB. Contributed by Sivaguru 
Sankaridurg and Dushyanth


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68682352
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68682352
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68682352

Branch: refs/heads/HADOOP-13345
Commit: 686823529be09bea2a6cecb3503ef722017475bc
Parents: 52d7d5a
Author: Mingliang Liu 
Authored: Mon Mar 6 17:16:36 2017 -0800
Committer: Mingliang Liu 
Committed: Mon Mar 6 17:16:36 2017 -0800

--
 .../src/main/resources/core-default.xml |  10 +
 .../conf/TestCommonConfigurationFields.java |   1 +
 .../fs/azure/AzureNativeFileSystemStore.java|   5 +-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 116 ++-
 .../fs/azure/RemoteWasbAuthorizerImpl.java  | 190 ++
 .../fs/azure/WasbAuthorizationException.java|  40 +++
 .../fs/azure/WasbAuthorizationOperations.java   |  44 +++
 .../fs/azure/WasbAuthorizerInterface.java   |  53 +++
 .../hadoop/fs/azure/WasbRemoteCallHelper.java   |  71 +++-
 .../hadoop-azure/src/site/markdown/index.md |  34 ++
 .../fs/azure/AzureBlobStorageTestAccount.java   |  61 ++--
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java | 102 ++
 .../TestNativeAzureFileSystemAuthorization.java | 344 +++
 .../fs/azure/TestWasbRemoteCallHelper.java  | 344 +++
 .../src/test/resources/azure-test.xml   |  28 +-
 15 files changed, 1373 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68682352/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 35be56b..52b58ed 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1292,6 +1292,16 @@
 to specify the time (such as 2s, 2m, 1h, etc.).
   
 
+
+  fs.azure.authorization
+  false
+  
+Config flag to enable authorization support in WASB. Setting it to "true" 
enables
+authorization support to WASB. Currently WASB authorization requires a 
remote service
+to provide authorization that needs to be specified via 
fs.azure.authorization.remote.service.url
+configuration
+  
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68682352/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 966a8ac..cbfb6d1 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -114,6 +114,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPropsToSkipCompare.add("fs.azure.sas.expiry.period");
 xmlPropsToSkipCompare.add("fs.azure.local.sas.key.mode");
 xmlPropsToSkipCompare.add("fs.azure.secure.mode");
+xmlPropsToSkipCompare.add("fs.azure.authorization");
 
 // Deprecated properties.  These should eventually be removed from the
 // class.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68682352/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 07c389c..a8708ec 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -249,7 +249,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
* Default values to control SAS Key mode.
* By default we set the values to false.
*/
-  private static final boolean DEFAULT_USE_SECURE_MODE = false;
+  public static final boolean DEFAULT_USE_SECURE_MODE = false;
   private static final boolean DEFAULT_USE_LOCAL_SAS_KEY_MODE = 

[03/50] [abbrv] hadoop git commit: HDFS-11441. Add escaping to error message in KMS web UI. Contributed by Aaron T. Myers.

2017-03-13 Thread stevel
HDFS-11441. Add escaping to error message in KMS web UI. Contributed by Aaron 
T. Myers.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec839b94
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec839b94
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec839b94

Branch: refs/heads/HADOOP-13345
Commit: ec839b94c0eb3f09e74f8a3b0bc9a08b3f5418b2
Parents: 209ecd1
Author: Andrew Wang 
Authored: Mon Mar 6 10:47:15 2017 -0800
Committer: Andrew Wang 
Committed: Mon Mar 6 10:47:15 2017 -0800

--
 .../hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec839b94/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
index 45e48e9..3e98a25 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
+import org.apache.hadoop.http.HtmlQuoting;
 import 
org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import 
org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
@@ -105,7 +106,7 @@ public class KMSAuthenticationFilter
 public void sendError(int sc, String msg) throws IOException {
   statusCode = sc;
   this.msg = msg;
-  super.sendError(sc, msg);
+  super.sendError(sc, HtmlQuoting.quoteHtmlChars(msg));
 }
 
 @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: HDFS-11498. Make RestCsrfPreventionHandler and WebHdfsHandler compatible with Netty 4.0.

2017-03-13 Thread stevel
HDFS-11498. Make RestCsrfPreventionHandler and WebHdfsHandler compatible with 
Netty 4.0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e74196e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e74196e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e74196e

Branch: refs/heads/HADOOP-13345
Commit: 5e74196ede9bfc20eb6d6fe3aa6a0e5c47a40fdd
Parents: d9dc444
Author: Andrew Wang 
Authored: Mon Mar 6 15:04:13 2017 -0800
Committer: Andrew Wang 
Committed: Mon Mar 6 15:04:13 2017 -0800

--
 .../web/RestCsrfPreventionFilterHandler.java|  6 ++--
 .../datanode/web/webhdfs/WebHdfsHandler.java| 33 ++--
 hadoop-project/pom.xml  |  2 +-
 3 files changed, 20 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e74196e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java
index f2f0533..4958bb5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.web;
 
-import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION;
-import static io.netty.handler.codec.http.HttpHeaderValues.CLOSE;
+import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
+import static io.netty.handler.codec.http.HttpHeaders.Values.CLOSE;
 import static 
io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
 import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
 
@@ -119,7 +119,7 @@ final class RestCsrfPreventionFilterHandler
 
 @Override
 public String getMethod() {
-  return req.method().name();
+  return req.getMethod().name();
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e74196e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index d2b2ec2..c5fc7ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -29,17 +29,6 @@ import io.netty.handler.codec.http.HttpMethod;
 import io.netty.handler.codec.http.HttpRequest;
 import io.netty.handler.codec.http.QueryStringDecoder;
 import io.netty.handler.stream.ChunkedStream;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.charset.StandardCharsets;
-import java.security.PrivilegedExceptionAction;
-import java.util.EnumSet;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -63,17 +52,27 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.LimitInputStream;
 
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.StandardCharsets;
+import java.security.PrivilegedExceptionAction;
+import java.util.EnumSet;
+
+import static io.netty.handler.codec.http.HttpHeaders.Names.ACCEPT;
+import static 
io.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS;
 import static 
io.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS;
 import static 
io.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN;
+import static 
io.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_MAX_AGE;
 import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
 import static 

[01/50] [abbrv] hadoop git commit: HDFS-8741. Proper error msg to be printed when invalid operation type is given to WebHDFS operations. Contributed by Surendra Singh Lilhore.

2017-03-13 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 090924ae4 -> 566c8b74f


HDFS-8741. Proper error msg to be printed when invalid operation type is given 
to WebHDFS operations. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3536ce03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3536ce03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3536ce03

Branch: refs/heads/HADOOP-13345
Commit: 3536ce031ca780d6de83cf67779f571a0142ccc8
Parents: fad766e
Author: Yiqun Lin 
Authored: Mon Mar 6 19:04:03 2017 +0800
Committer: Yiqun Lin 
Committed: Mon Mar 6 19:04:03 2017 +0800

--
 .../hdfs/web/resources/DeleteOpParam.java   | 11 +-
 .../hadoop/hdfs/web/resources/GetOpParam.java   | 11 +-
 .../hadoop/hdfs/web/resources/PostOpParam.java  | 11 +-
 .../hadoop/hdfs/web/resources/PutOpParam.java   | 11 +-
 .../hadoop/hdfs/web/resources/TestParam.java| 41 
 5 files changed, 81 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3536ce03/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
index 25bed1c..e765498 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
@@ -72,7 +72,16 @@ public class DeleteOpParam extends 
HttpOpParam {
* @param str a string representation of the parameter value.
*/
   public DeleteOpParam(final String str) {
-super(DOMAIN, DOMAIN.parse(str));
+super(DOMAIN, getOp(str));
+  }
+
+  private static Op getOp(String str) {
+try {
+  return DOMAIN.parse(str);
+} catch (IllegalArgumentException e) {
+  throw new IllegalArgumentException(str + " is not a valid " + Type.DELETE
+  + " operation.");
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3536ce03/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
index 1321bf6..d32af33 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
@@ -111,7 +111,16 @@ public class GetOpParam extends HttpOpParam 
{
* @param str a string representation of the parameter value.
*/
   public GetOpParam(final String str) {
-super(DOMAIN, DOMAIN.parse(str));
+super(DOMAIN, getOp(str));
+  }
+
+  private static Op getOp(String str) {
+try {
+  return DOMAIN.parse(str);
+} catch (IllegalArgumentException e) {
+  throw new IllegalArgumentException(str + " is not a valid " + Type.GET
+  + " operation.");
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3536ce03/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
index 56a14c7..305db46 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
@@ -80,7 +80,16 @@ public class PostOpParam extends HttpOpParam 
{
* @param str a string representation of the parameter value.
*/
   public PostOpParam(final String str) {
-super(DOMAIN, DOMAIN.parse(str));
+super(DOMAIN, getOp(str));
+  }
+
+  private static Op getOp(String str) {
+try {
+  return DOMAIN.parse(str);
+} catch (IllegalArgumentException e) {
+  throw new IllegalArgumentException(str + " is not a valid " + Type.POST
+  + " 

[04/50] [abbrv] hadoop git commit: YARN-5665. Enhance documentation for yarn.resourcemanager.scheduler.class property. (Yufei Gu via rchiang)

2017-03-13 Thread stevel
YARN-5665. Enhance documentation for yarn.resourcemanager.scheduler.class 
property. (Yufei Gu via rchiang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9dc444d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9dc444d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9dc444d

Branch: refs/heads/HADOOP-13345
Commit: d9dc444dc73fbe23f9e553d63baf83f12c636fa7
Parents: ec839b9
Author: Ray Chiang 
Authored: Mon Mar 6 14:02:49 2017 -0800
Committer: Ray Chiang 
Committed: Mon Mar 6 14:02:49 2017 -0800

--
 .../hadoop-common/src/site/markdown/ClusterSetup.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9dc444d/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
index 1d9e9da..7be6a19 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
@@ -139,7 +139,7 @@ This section deals with important parameters to be 
specified in the given config
 | `yarn.resourcemanager.admin.address` | `ResourceManager` host:port for 
administrative commands. | *host:port* If set, overrides the hostname set in 
`yarn.resourcemanager.hostname`. |
 | `yarn.resourcemanager.webapp.address` | `ResourceManager` web-ui host:port. 
| *host:port* If set, overrides the hostname set in 
`yarn.resourcemanager.hostname`. |
 | `yarn.resourcemanager.hostname` | `ResourceManager` host. | *host* Single 
hostname that can be set in place of setting all `yarn.resourcemanager*address` 
resources. Results in default ports for ResourceManager components. |
-| `yarn.resourcemanager.scheduler.class` | `ResourceManager` Scheduler class. 
| `CapacityScheduler` (recommended), `FairScheduler` (also recommended), or 
`FifoScheduler` |
+| `yarn.resourcemanager.scheduler.class` | `ResourceManager` Scheduler class. 
| `CapacityScheduler` (recommended), `FairScheduler` (also recommended), or 
`FifoScheduler`. Use a fully qualified class name, e.g., 
`org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler`. |
 | `yarn.scheduler.minimum-allocation-mb` | Minimum limit of memory to allocate 
to each container request at the `Resource Manager`. | In MBs |
 | `yarn.scheduler.maximum-allocation-mb` | Maximum limit of memory to allocate 
to each container request at the `Resource Manager`. | In MBs |
 | `yarn.resourcemanager.nodes.include-path` / 
`yarn.resourcemanager.nodes.exclude-path` | List of permitted/excluded 
NodeManagers. | If necessary, use these files to control the list of allowable 
NodeManagers. |


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] [abbrv] hadoop git commit: HADOOP-14108. CLI MiniCluster: add an option to specify NameNode HTTP port. Contributed by Takanobu Asanuma.

2017-03-13 Thread stevel
HADOOP-14108. CLI MiniCluster: add an option to specify NameNode HTTP port. 
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/209ecd1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/209ecd1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/209ecd1a

Branch: refs/heads/HADOOP-13345
Commit: 209ecd1a5c056e096a5d804759f88302608d503b
Parents: 3536ce0
Author: Akira Ajisaka 
Authored: Tue Mar 7 01:32:47 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Mar 7 01:34:51 2017 +0900

--
 .../hadoop-common/src/site/markdown/CLIMiniCluster.md.vm  | 1 +
 .../org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java | 7 ++-
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/209ecd1a/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm 
b/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm
index 74c2414..806df0a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm
@@ -55,6 +55,7 @@ There are a number of command line arguments that the users 
can use to control w
 $ -namenode URL of the namenode (default is either the DFS
 $cluster or a temporary dir)
 $ -nnport   NameNode port (default 0--we choose)
+$ -nnhttpport   NameNode HTTP port (default 0--we choose)
 $ -nodemanagers How many nodemanagers to start (default 1)
 $ -nodfs Don't start a mini DFS cluster
 $ -nomr  Don't start a mini MR cluster

http://git-wip-us.apache.org/repos/asf/hadoop/blob/209ecd1a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
index 3cc73b5..324f0ca 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
@@ -69,6 +69,7 @@ public class MiniHadoopClusterManager {
   private int numNodeManagers;
   private int numDataNodes;
   private int nnPort;
+  private int nnHttpPort;
   private int rmPort;
   private int jhsPort;
   private StartupOption dfsOpts;
@@ -92,6 +93,8 @@ public class MiniHadoopClusterManager {
 .addOption("datanodes", true, "How many datanodes to start (default 
1)")
 .addOption("format", false, "Format the DFS (default false)")
 .addOption("nnport", true, "NameNode port (default 0--we choose)")
+.addOption("nnhttpport", true,
+"NameNode HTTP port (default 0--we choose)")
 .addOption(
 "namenode",
 true,
@@ -152,7 +155,8 @@ public class MiniHadoopClusterManager {
   URISyntaxException {
 if (!noDFS) {
   dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
-  .numDataNodes(numDataNodes).startupOption(dfsOpts).build();
+  .nameNodeHttpPort(nnHttpPort).numDataNodes(numDataNodes)
+  .startupOption(dfsOpts).build();
   LOG.info("Started MiniDFSCluster -- namenode on port "
   + dfs.getNameNodePort());
 }
@@ -254,6 +258,7 @@ public class MiniHadoopClusterManager {
 noDFS = cli.hasOption("nodfs");
 numDataNodes = intArgument(cli, "datanodes", 1);
 nnPort = intArgument(cli, "nnport", 0);
+nnHttpPort = intArgument(cli, "nnhttpport", 0);
 dfsOpts = cli.hasOption("format") ? StartupOption.FORMAT
 : StartupOption.REGULAR;
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11512. Increase timeout on TestShortCircuitLocalRead#testSkipWithVerifyChecksum. Contributed by Eric Badger.

2017-03-13 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 b4d768123 -> 1f2da39d0


HDFS-11512. Increase timeout on 
TestShortCircuitLocalRead#testSkipWithVerifyChecksum. Contributed by Eric 
Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f2da39d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f2da39d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f2da39d

Branch: refs/heads/branch-2.8
Commit: 1f2da39d058f19220244143bc2d3b38584582e2e
Parents: b4d7681
Author: Yiqun Lin 
Authored: Mon Mar 13 18:22:30 2017 +0800
Committer: Yiqun Lin 
Committed: Mon Mar 13 18:29:57 2017 +0800

--
 .../apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f2da39d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
index 69eeb9f..93486c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
@@ -388,7 +388,7 @@ public class TestShortCircuitLocalRead {
 }
   }
 
-  @Test(timeout=1)
+  @Test(timeout=6)
   public void testSkipWithVerifyChecksum() throws IOException {
 int size = blockSize;
 Configuration conf = new Configuration();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11512. Increase timeout on TestShortCircuitLocalRead#testSkipWithVerifyChecksum. Contributed by Eric Badger.

2017-03-13 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ee1d3105c -> 7a65601dd


HDFS-11512. Increase timeout on 
TestShortCircuitLocalRead#testSkipWithVerifyChecksum. Contributed by Eric 
Badger.

(cherry picked from commit 79924266f8f68e5e7c873e6b12e3b3acfcd708da)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a65601d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a65601d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a65601d

Branch: refs/heads/branch-2
Commit: 7a65601dd9e2549c678f50287296d8c453187e05
Parents: ee1d310
Author: Yiqun Lin 
Authored: Mon Mar 13 18:22:30 2017 +0800
Committer: Yiqun Lin 
Committed: Mon Mar 13 18:24:50 2017 +0800

--
 .../apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a65601d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
index 69eeb9f..93486c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
@@ -388,7 +388,7 @@ public class TestShortCircuitLocalRead {
 }
   }
 
-  @Test(timeout=1)
+  @Test(timeout=6)
   public void testSkipWithVerifyChecksum() throws IOException {
 int size = blockSize;
 Configuration conf = new Configuration();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11512. Increase timeout on TestShortCircuitLocalRead#testSkipWithVerifyChecksum. Contributed by Eric Badger.

2017-03-13 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk 04a5f5a6d -> 79924266f


HDFS-11512. Increase timeout on 
TestShortCircuitLocalRead#testSkipWithVerifyChecksum. Contributed by Eric 
Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79924266
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79924266
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79924266

Branch: refs/heads/trunk
Commit: 79924266f8f68e5e7c873e6b12e3b3acfcd708da
Parents: 04a5f5a
Author: Yiqun Lin 
Authored: Mon Mar 13 18:22:30 2017 +0800
Committer: Yiqun Lin 
Committed: Mon Mar 13 18:22:30 2017 +0800

--
 .../apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79924266/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
index 55e9795..f2ee48c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
@@ -388,7 +388,7 @@ public class TestShortCircuitLocalRead {
 }
   }
 
-  @Test(timeout=1)
+  @Test(timeout=6)
   public void testSkipWithVerifyChecksum() throws IOException {
 int size = blockSize;
 Configuration conf = new Configuration();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14156. Fix grammar error in ConfTest.java.

2017-03-13 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4db9cc70d -> 04a5f5a6d


HADOOP-14156. Fix grammar error in ConfTest.java.

This closes #187

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04a5f5a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04a5f5a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04a5f5a6

Branch: refs/heads/trunk
Commit: 04a5f5a6dc88769cca8b1a15057a0756712b5013
Parents: 4db9cc7
Author: Andrey Dyatlov 
Authored: Mon Feb 6 19:05:58 2017 +0100
Committer: Akira Ajisaka 
Committed: Mon Mar 13 16:15:53 2017 +0900

--
 .../src/main/java/org/apache/hadoop/util/ConfTest.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04a5f5a6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
index 3f37f5a..1915e79 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
@@ -269,7 +269,7 @@ public final class ConfTest {
 } else {
   String confDirName = System.getenv(HADOOP_CONF_DIR);
   if (confDirName == null) {
-terminate(1, HADOOP_CONF_DIR + " does not defined");
+terminate(1, HADOOP_CONF_DIR + " is not defined");
   }
   File confDir = new File(confDirName);
   if (!confDir.isDirectory()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org