hadoop git commit: HDFS-13184. RBF: Improve the unit test TestRouterRPCClientRetries. Contributed by Yiqun Lin.

2018-02-26 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 6024b3a2a -> 13e86a7ab


HDFS-13184. RBF: Improve the unit test TestRouterRPCClientRetries. Contributed 
by Yiqun Lin.

(cherry picked from commit 1e85a995d1c7fe3dbf4b36a481feb5fdeeb6015b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13e86a7a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13e86a7a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13e86a7a

Branch: refs/heads/branch-2.9
Commit: 13e86a7ab8216105ea64090380fcfbc72b0d0a1e
Parents: 6024b3a
Author: Yiqun Lin 
Authored: Tue Feb 27 10:48:52 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Feb 27 10:55:39 2018 +0800

--
 .../federation/router/TestRouterRPCClientRetries.java| 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13e86a7a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
index dddcb5a..61e7657 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
@@ -25,6 +25,7 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -64,8 +65,16 @@ public class TestRouterRPCClientRetries {
 .rpc()
 .build();
 
+// reduce IPC client connection retry times and interval time
+Configuration clientConf = new Configuration(false);
+clientConf.setInt(
+CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
+clientConf.setInt(
+CommonConfigurationKeys.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, 100);
+
 cluster.addRouterOverrides(routerConf);
-cluster.startCluster();
+// override some settings for the client
+cluster.startCluster(clientConf);
 cluster.startRouters();
 cluster.waitClusterUp();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13184. RBF: Improve the unit test TestRouterRPCClientRetries. Contributed by Yiqun Lin.

2018-02-26 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 762125b86 -> 1b0aa0e87


HDFS-13184. RBF: Improve the unit test TestRouterRPCClientRetries. Contributed 
by Yiqun Lin.

(cherry picked from commit 1e85a995d1c7fe3dbf4b36a481feb5fdeeb6015b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b0aa0e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b0aa0e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b0aa0e8

Branch: refs/heads/branch-2
Commit: 1b0aa0e87aca0d6b7e5e3e7597b90ec068e43145
Parents: 762125b
Author: Yiqun Lin 
Authored: Tue Feb 27 10:48:52 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Feb 27 10:54:37 2018 +0800

--
 .../federation/router/TestRouterRPCClientRetries.java| 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b0aa0e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
index dddcb5a..61e7657 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
@@ -25,6 +25,7 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -64,8 +65,16 @@ public class TestRouterRPCClientRetries {
 .rpc()
 .build();
 
+// reduce IPC client connection retry times and interval time
+Configuration clientConf = new Configuration(false);
+clientConf.setInt(
+CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
+clientConf.setInt(
+CommonConfigurationKeys.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, 100);
+
 cluster.addRouterOverrides(routerConf);
-cluster.startCluster();
+// override some settings for the client
+cluster.startCluster(clientConf);
 cluster.startRouters();
 cluster.waitClusterUp();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13184. RBF: Improve the unit test TestRouterRPCClientRetries. Contributed by Yiqun Lin.

2018-02-26 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 26395aef6 -> 09017aee9


HDFS-13184. RBF: Improve the unit test TestRouterRPCClientRetries. Contributed 
by Yiqun Lin.

(cherry picked from commit 1e85a995d1c7fe3dbf4b36a481feb5fdeeb6015b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09017aee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09017aee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09017aee

Branch: refs/heads/branch-3.0
Commit: 09017aee903bed4d09918b6176757d30a8cd3b03
Parents: 26395ae
Author: Yiqun Lin 
Authored: Tue Feb 27 10:48:52 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Feb 27 10:52:49 2018 +0800

--
 .../federation/router/TestRouterRPCClientRetries.java| 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09017aee/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
index dddcb5a..61e7657 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
@@ -25,6 +25,7 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -64,8 +65,16 @@ public class TestRouterRPCClientRetries {
 .rpc()
 .build();
 
+// reduce IPC client connection retry times and interval time
+Configuration clientConf = new Configuration(false);
+clientConf.setInt(
+CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
+clientConf.setInt(
+CommonConfigurationKeys.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, 100);
+
 cluster.addRouterOverrides(routerConf);
-cluster.startCluster();
+// override some settings for the client
+cluster.startCluster(clientConf);
 cluster.startRouters();
 cluster.waitClusterUp();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13184. RBF: Improve the unit test TestRouterRPCClientRetries. Contributed by Yiqun Lin.

2018-02-26 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 c58cec250 -> e54c76625


HDFS-13184. RBF: Improve the unit test TestRouterRPCClientRetries. Contributed 
by Yiqun Lin.

(cherry picked from commit 1e85a995d1c7fe3dbf4b36a481feb5fdeeb6015b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e54c7662
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e54c7662
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e54c7662

Branch: refs/heads/branch-3.1
Commit: e54c76625f140997142e9fabb0e2b5984858524f
Parents: c58cec2
Author: Yiqun Lin 
Authored: Tue Feb 27 10:48:52 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Feb 27 10:51:32 2018 +0800

--
 .../federation/router/TestRouterRPCClientRetries.java| 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e54c7662/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
index dddcb5a..61e7657 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
@@ -25,6 +25,7 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -64,8 +65,16 @@ public class TestRouterRPCClientRetries {
 .rpc()
 .build();
 
+// reduce IPC client connection retry times and interval time
+Configuration clientConf = new Configuration(false);
+clientConf.setInt(
+CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
+clientConf.setInt(
+CommonConfigurationKeys.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, 100);
+
 cluster.addRouterOverrides(routerConf);
-cluster.startCluster();
+// override some settings for the client
+cluster.startCluster(clientConf);
 cluster.startRouters();
 cluster.waitClusterUp();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13184. RBF: Improve the unit test TestRouterRPCClientRetries. Contributed by Yiqun Lin.

2018-02-26 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk ae290a4bb -> 1e85a995d


HDFS-13184. RBF: Improve the unit test TestRouterRPCClientRetries. Contributed 
by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e85a995
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e85a995
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e85a995

Branch: refs/heads/trunk
Commit: 1e85a995d1c7fe3dbf4b36a481feb5fdeeb6015b
Parents: ae290a4
Author: Yiqun Lin 
Authored: Tue Feb 27 10:48:52 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Feb 27 10:48:52 2018 +0800

--
 .../federation/router/TestRouterRPCClientRetries.java| 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e85a995/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
index dddcb5a..61e7657 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
@@ -25,6 +25,7 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -64,8 +65,16 @@ public class TestRouterRPCClientRetries {
 .rpc()
 .build();
 
+// reduce IPC client connection retry times and interval time
+Configuration clientConf = new Configuration(false);
+clientConf.setInt(
+CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
+clientConf.setInt(
+CommonConfigurationKeys.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, 100);
+
 cluster.addRouterOverrides(routerConf);
-cluster.startCluster();
+// override some settings for the client
+cluster.startCluster(clientConf);
 cluster.startRouters();
 cluster.waitClusterUp();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7856. Validate Node Attributes from NM. Contributed by Weiwei Yang.

2018-02-26 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3409 0c3bf98c2 -> 47cd0d9ab


YARN-7856. Validate Node Attributes from NM. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47cd0d9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47cd0d9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47cd0d9a

Branch: refs/heads/YARN-3409
Commit: 47cd0d9abbeb1f4976d82cc60a26d462e10b08a0
Parents: 0c3bf98
Author: Sunil G 
Authored: Tue Feb 27 08:15:42 2018 +0530
Committer: Sunil G 
Committed: Tue Feb 27 08:15:42 2018 +0530

--
 .../hadoop/yarn/api/records/NodeAttribute.java  |  2 ++
 .../hadoop/yarn/nodelabels/NodeLabelUtil.java   | 31 
 .../ScriptBasedNodeAttributesProvider.java  | 25 ++--
 .../TestScriptBasedNodeAttributesProvider.java  | 27 +
 4 files changed, 83 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47cd0d9a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java
index 01c70b2..4f6846b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java
@@ -46,6 +46,8 @@ import org.apache.hadoop.yarn.util.Records;
 public abstract class NodeAttribute {
 
   public static final String DEFAULT_PREFIX = "";
+  public static final String PREFIX_DISTRIBUTED = "nm.yarn.io";
+  public static final String PREFIX_CENTRALIZED = "rm.yarn.io";
 
   public static NodeAttribute newInstance(String attributeName,
   NodeAttributeType attributeType, String attributeValue) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47cd0d9a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtil.java
index d918712..fdfd0ce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtil.java
@@ -17,7 +17,11 @@
  */
 package org.apache.hadoop.yarn.nodelabels;
 
+import com.google.common.base.Strings;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+
 import java.io.IOException;
+import java.util.Set;
 import java.util.regex.Pattern;
 
 /**
@@ -94,4 +98,31 @@ public final class NodeLabelUtil {
   + ", now it is= " + prefix);
 }
   }
+
+  /**
+   * Validate if a given set of attributes are valid. Attributes could be
+   * invalid if any of following conditions is met:
+   *
+   * 
+   *   Missing prefix: the attribute doesn't have prefix defined
+   *   Malformed attribute prefix: the prefix is not in valid format
+   * 
+   * @param attributeSet
+   * @throws IOException
+   */
+  public static void validateNodeAttributes(Set attributeSet)
+  throws IOException {
+if (attributeSet != null && !attributeSet.isEmpty()) {
+  for (NodeAttribute nodeAttribute : attributeSet) {
+String prefix = nodeAttribute.getAttributePrefix();
+if (Strings.isNullOrEmpty(prefix)) {
+  throw new IOException("Attribute prefix must be set");
+}
+// Verify attribute prefix format.
+checkAndThrowAttributePrefix(prefix);
+// Verify attribute name format.
+checkAndThrowLabelName(nodeAttribute.getAttributeName());
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47cd0d9a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ScriptBasedNodeAttributesProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ScriptBasedNodeAttributesProvider.java
 

[2/2] hadoop git commit: Backport HADOOP-13514 (surefire upgrade) to branch-2

2018-02-26 Thread cdouglas
Backport HADOOP-13514 (surefire upgrade) to branch-2

(cherry picked from commit 762125b864ab812512bad9a59344ca79af7f43ac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6024b3a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6024b3a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6024b3a2

Branch: refs/heads/branch-2.9
Commit: 6024b3a2add1247b7b6e82d50f5267a8772e14e3
Parents: 39e1f96
Author: Chris Douglas 
Authored: Mon Feb 26 16:32:06 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 26 16:32:19 2018 -0800

--
 BUILDING.txt  | 4 ++--
 hadoop-project/pom.xml| 5 +++--
 hadoop-tools/hadoop-aws/pom.xml   | 2 ++
 hadoop-tools/hadoop-azure/pom.xml | 3 +++
 4 files changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6024b3a2/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 9c1fbd6..e7701a5 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -267,10 +267,10 @@ If the build process fails with an out of memory error, 
you should be able to fi
 it by increasing the memory used by maven which can be done via the environment
 variable MAVEN_OPTS.
 
-Here is an example setting to allocate between 256 and 512 MB of heap space to
+Here is an example setting to allocate between 256 MB and 1 GB of heap space to
 Maven
 
-export MAVEN_OPTS="-Xms256m -Xmx512m"
+export MAVEN_OPTS="-Xms256m -Xmx1g"
 
 
--
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6024b3a2/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 6441dc9..9f80749 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -111,7 +111,7 @@
 
 
 -Xmx2048m -XX:MaxPermSize=768m 
-XX:+HeapDumpOnOutOfMemoryError
-2.17
+2.20.1
 
${maven-surefire-plugin.version}
 
${maven-surefire-plugin.version}
 
@@ -1509,6 +1509,7 @@
 
${env.DYLD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib
 4
   
+  false
   
 
 ${project.build.directory}/log
@@ -1519,7 +1520,7 @@
 ${test.build.data}
 ${test.build.webapps}
 ${test.cache.data}
-${test.build.classes}
+
${project.build.directory}/test-classes
 
 true
 
${project.build.directory}/test-classes/krb5.conf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6024b3a2/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 9343af2..920f985 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -153,6 +153,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
${fs.s3a.scale.test.timeout}
+  false
   
 
 true
@@ -209,6 +210,7 @@
 
 
   
${fs.s3a.scale.test.timeout}
+  false
   
 
 false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6024b3a2/hadoop-tools/hadoop-azure/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure/pom.xml 
b/hadoop-tools/hadoop-azure/pom.xml
index 81ea1ff..8b8a584 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -326,6 +326,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
${fs.azure.scale.test.timeout}
+  false
   
 
 true
@@ -381,6 +382,7 @@
 
 
   
${fs.azure.scale.test.timeout}
+  false
   
 false
 
${fs.azure.scale.test.enabled}
@@ -431,6 +433,7 @@
 
${fs.azure.scale.test.timeout}
   
   
${fs.azure.scale.test.timeout}
+  false
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

[1/2] hadoop git commit: Backport HADOOP-13514 (surefire upgrade) to branch-2

2018-02-26 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 54803ebe4 -> 762125b86
  refs/heads/branch-2.9 39e1f963b -> 6024b3a2a


Backport HADOOP-13514 (surefire upgrade) to branch-2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/762125b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/762125b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/762125b8

Branch: refs/heads/branch-2
Commit: 762125b864ab812512bad9a59344ca79af7f43ac
Parents: 54803eb
Author: Chris Douglas 
Authored: Mon Feb 26 16:32:06 2018 -0800
Committer: Chris Douglas 
Committed: Mon Feb 26 16:32:06 2018 -0800

--
 BUILDING.txt  | 4 ++--
 hadoop-project/pom.xml| 5 +++--
 hadoop-tools/hadoop-aws/pom.xml   | 2 ++
 hadoop-tools/hadoop-azure/pom.xml | 3 +++
 4 files changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/762125b8/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 9c1fbd6..e7701a5 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -267,10 +267,10 @@ If the build process fails with an out of memory error, 
you should be able to fi
 it by increasing the memory used by maven which can be done via the environment
 variable MAVEN_OPTS.
 
-Here is an example setting to allocate between 256 and 512 MB of heap space to
+Here is an example setting to allocate between 256 MB and 1 GB of heap space to
 Maven
 
-export MAVEN_OPTS="-Xms256m -Xmx512m"
+export MAVEN_OPTS="-Xms256m -Xmx1g"
 
 
--
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/762125b8/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 6ba1ced..f560088 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -111,7 +111,7 @@
 
 
 -Xmx2048m -XX:MaxPermSize=768m 
-XX:+HeapDumpOnOutOfMemoryError
-2.17
+2.20.1
 
${maven-surefire-plugin.version}
 
${maven-surefire-plugin.version}
 
@@ -1521,6 +1521,7 @@
 
${env.DYLD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib
 4
   
+  false
   
 
 ${project.build.directory}/log
@@ -1531,7 +1532,7 @@
 ${test.build.data}
 ${test.build.webapps}
 ${test.cache.data}
-${test.build.classes}
+
${project.build.directory}/test-classes
 
 true
 
${project.build.directory}/test-classes/krb5.conf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/762125b8/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index d8b54c4..659af93 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -153,6 +153,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
${fs.s3a.scale.test.timeout}
+  false
   
 
 true
@@ -209,6 +210,7 @@
 
 
   
${fs.s3a.scale.test.timeout}
+  false
   
 
 false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/762125b8/hadoop-tools/hadoop-azure/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure/pom.xml 
b/hadoop-tools/hadoop-azure/pom.xml
index 244f432..c9325ff 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -326,6 +326,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
${fs.azure.scale.test.timeout}
+  false
   
 
 true
@@ -381,6 +382,7 @@
 
 
   
${fs.azure.scale.test.timeout}
+  false
   
 false
 
${fs.azure.scale.test.enabled}
@@ -431,6 +433,7 @@
 
${fs.azure.scale.test.timeout}
   
   
${fs.azure.scale.test.timeout}
+  false
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org

[3/3] hadoop git commit: HDFS-13145. SBN crash when transition to ANN with in-progress edit tailing enabled. Contributed by Chao Sun.

2018-02-26 Thread shv
HDFS-13145. SBN crash when transition to ANN with in-progress edit tailing 
enabled. Contributed by Chao Sun.

(cherry picked from commit ae290a4bb4e514e2fe9b40d28426a7589afe2a3f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26395aef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26395aef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26395aef

Branch: refs/heads/branch-3.0
Commit: 26395aef674face8bf73feb91b88c2ae5fb9a400
Parents: 1fb87df
Author: Chao Sun 
Authored: Mon Feb 26 15:37:27 2018 -0800
Committer: Konstantin V Shvachko 
Committed: Mon Feb 26 16:18:51 2018 -0800

--
 .../qjournal/client/QuorumJournalManager.java   |  4 ++-
 .../client/TestQuorumJournalManager.java| 26 +++-
 2 files changed, 28 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26395aef/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index 7dff9b4..7a70a3d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -496,7 +496,9 @@ public class QuorumJournalManager implements JournalManager 
{
 
 // If it's bounded by durable Txns, endTxId could not be larger
 // than committedTxnId. This ensures the consistency.
-if (onlyDurableTxns && inProgressOk) {
+// We don't do the following for finalized log segments, since all
+// edits in those are guaranteed to be committed.
+if (onlyDurableTxns && inProgressOk && remoteLog.isInProgress()) {
   endTxId = Math.min(endTxId, committedTxnId);
   if (endTxId < remoteLog.getStartTxId()) {
 LOG.warn("Found endTxId (" + endTxId + ") that is less than " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26395aef/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index ce1d404..34a0348 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -933,7 +933,31 @@ public class TestQuorumJournalManager {
 
 verifyEdits(streams, 25, 50);
   }
-  
+
+  @Test
+  public void testInProgressRecovery() throws Exception {
+// Test the case when in-progress edit log tailing is on, and
+// new active performs recovery when the old active crashes
+// without closing the last log segment.
+// See HDFS-13145 for more details.
+
+// Write two batches of edits. After these, the commitId on the
+// journals should be 5, and endTxnId should be 8.
+EditLogOutputStream stm = qjm.startLogSegment(1,
+NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
+writeTxns(stm, 1, 5);
+writeTxns(stm, 6, 3);
+
+// Do recovery from a separate QJM, just like in failover.
+QuorumJournalManager qjm2 = createSpyingQJM();
+qjm2.recoverUnfinalizedSegments();
+checkRecovery(cluster, 1, 8);
+
+// When selecting input stream, we should see all txns up to 8.
+List streams = new ArrayList<>();
+qjm2.selectInputStreams(streams, 1, true, true);
+verifyEdits(streams, 1, 8);
+  }
   
   private QuorumJournalManager createSpyingQJM()
   throws IOException, URISyntaxException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HDFS-13145. SBN crash when transition to ANN with in-progress edit tailing enabled. Contributed by Chao Sun.

2018-02-26 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1fb87df87 -> 26395aef6
  refs/heads/branch-3.1 9b37d1869 -> c58cec250
  refs/heads/trunk b4f1ba141 -> ae290a4bb


HDFS-13145. SBN crash when transition to ANN with in-progress edit tailing 
enabled. Contributed by Chao Sun.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae290a4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae290a4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae290a4b

Branch: refs/heads/trunk
Commit: ae290a4bb4e514e2fe9b40d28426a7589afe2a3f
Parents: b4f1ba1
Author: Chao Sun 
Authored: Mon Feb 26 15:37:27 2018 -0800
Committer: Konstantin V Shvachko 
Committed: Mon Feb 26 16:15:00 2018 -0800

--
 .../qjournal/client/QuorumJournalManager.java   |  4 ++-
 .../client/TestQuorumJournalManager.java| 26 +++-
 2 files changed, 28 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae290a4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index 7dff9b4..7a70a3d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -496,7 +496,9 @@ public class QuorumJournalManager implements JournalManager 
{
 
 // If it's bounded by durable Txns, endTxId could not be larger
 // than committedTxnId. This ensures the consistency.
-if (onlyDurableTxns && inProgressOk) {
+// We don't do the following for finalized log segments, since all
+// edits in those are guaranteed to be committed.
+if (onlyDurableTxns && inProgressOk && remoteLog.isInProgress()) {
   endTxId = Math.min(endTxId, committedTxnId);
   if (endTxId < remoteLog.getStartTxId()) {
 LOG.warn("Found endTxId (" + endTxId + ") that is less than " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae290a4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index ce1d404..34a0348 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -933,7 +933,31 @@ public class TestQuorumJournalManager {
 
 verifyEdits(streams, 25, 50);
   }
-  
+
+  @Test
+  public void testInProgressRecovery() throws Exception {
+// Test the case when in-progress edit log tailing is on, and
+// new active performs recovery when the old active crashes
+// without closing the last log segment.
+// See HDFS-13145 for more details.
+
+// Write two batches of edits. After these, the commitId on the
+// journals should be 5, and endTxnId should be 8.
+EditLogOutputStream stm = qjm.startLogSegment(1,
+NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
+writeTxns(stm, 1, 5);
+writeTxns(stm, 6, 3);
+
+// Do recovery from a separate QJM, just like in failover.
+QuorumJournalManager qjm2 = createSpyingQJM();
+qjm2.recoverUnfinalizedSegments();
+checkRecovery(cluster, 1, 8);
+
+// When selecting input stream, we should see all txns up to 8.
+List streams = new ArrayList<>();
+qjm2.selectInputStreams(streams, 1, true, true);
+verifyEdits(streams, 1, 8);
+  }
   
   private QuorumJournalManager createSpyingQJM()
   throws IOException, URISyntaxException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: HDFS-13145. SBN crash when transition to ANN with in-progress edit tailing enabled. Contributed by Chao Sun.

2018-02-26 Thread shv
HDFS-13145. SBN crash when transition to ANN with in-progress edit tailing 
enabled. Contributed by Chao Sun.

(cherry picked from commit ae290a4bb4e514e2fe9b40d28426a7589afe2a3f)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c58cec25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c58cec25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c58cec25

Branch: refs/heads/branch-3.1
Commit: c58cec25072e0c9fd714c4d5d8867f43894e9a19
Parents: 9b37d18
Author: Chao Sun 
Authored: Mon Feb 26 15:37:27 2018 -0800
Committer: Konstantin V Shvachko 
Committed: Mon Feb 26 16:17:36 2018 -0800

--
 .../qjournal/client/QuorumJournalManager.java   |  4 ++-
 .../client/TestQuorumJournalManager.java| 26 +++-
 2 files changed, 28 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58cec25/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index 7dff9b4..7a70a3d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -496,7 +496,9 @@ public class QuorumJournalManager implements JournalManager 
{
 
 // If it's bounded by durable Txns, endTxId could not be larger
 // than committedTxnId. This ensures the consistency.
-if (onlyDurableTxns && inProgressOk) {
+// We don't do the following for finalized log segments, since all
+// edits in those are guaranteed to be committed.
+if (onlyDurableTxns && inProgressOk && remoteLog.isInProgress()) {
   endTxId = Math.min(endTxId, committedTxnId);
   if (endTxId < remoteLog.getStartTxId()) {
 LOG.warn("Found endTxId (" + endTxId + ") that is less than " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58cec25/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index ce1d404..34a0348 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -933,7 +933,31 @@ public class TestQuorumJournalManager {
 
 verifyEdits(streams, 25, 50);
   }
-  
+
+  @Test
+  public void testInProgressRecovery() throws Exception {
+// Test the case when in-progress edit log tailing is on, and
+// new active performs recovery when the old active crashes
+// without closing the last log segment.
+// See HDFS-13145 for more details.
+
+// Write two batches of edits. After these, the commitId on the
+// journals should be 5, and endTxnId should be 8.
+EditLogOutputStream stm = qjm.startLogSegment(1,
+NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
+writeTxns(stm, 1, 5);
+writeTxns(stm, 6, 3);
+
+// Do recovery from a separate QJM, just like in failover.
+QuorumJournalManager qjm2 = createSpyingQJM();
+qjm2.recoverUnfinalizedSegments();
+checkRecovery(cluster, 1, 8);
+
+// When selecting input stream, we should see all txns up to 8.
+List streams = new ArrayList<>();
+qjm2.selectInputStreams(streams, 1, true, true);
+verifyEdits(streams, 1, 8);
+  }
   
   private QuorumJournalManager createSpyingQJM()
   throws IOException, URISyntaxException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7963. Updated MockServiceAM unit test to prevent test hang. Contributed by Chandni Singh

2018-02-26 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 6ca6ae791 -> 9b37d1869


YARN-7963.  Updated MockServiceAM unit test to prevent test hang.
 Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b37d186
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b37d186
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b37d186

Branch: refs/heads/branch-3.1
Commit: 9b37d18693ea14d68e81ec73dccf838c315c5258
Parents: 6ca6ae7
Author: Eric Yang 
Authored: Mon Feb 26 18:49:01 2018 -0500
Committer: Eric Yang 
Committed: Mon Feb 26 18:53:14 2018 -0500

--
 .../org/apache/hadoop/yarn/service/MockServiceAM.java | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b37d186/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
index 3e1582d..4373893 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
@@ -196,7 +196,15 @@ public class MockServiceAM extends ServiceMaster {
 
   @Override
   public RegisterApplicationMasterResponse registerApplicationMaster(
-  String appHostName, int appHostPort, String appTrackingUrl) {
+  String appHostName, int appHostPort, String appTrackingUrl,
+  Map placementConstraintsMap) throws YarnException, IOException {
+return this.registerApplicationMaster(appHostName, appHostPort,
+appTrackingUrl);
+  }
+
+  @Override
+public RegisterApplicationMasterResponse registerApplicationMaster(
+String appHostName, int appHostPort, String appTrackingUrl) {
 RegisterApplicationMasterResponse response = mock(
 RegisterApplicationMasterResponse.class);
 when(response.getResourceTypes()).thenReturn(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7963. Updated MockServiceAM unit test to prevent test hang. Contributed by Chandni Singh

2018-02-26 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6ce9f79cc -> b4f1ba141


YARN-7963.  Updated MockServiceAM unit test to prevent test hang.
 Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4f1ba14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4f1ba14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4f1ba14

Branch: refs/heads/trunk
Commit: b4f1ba14133568f663da080adf644149253b5b05
Parents: 6ce9f79
Author: Eric Yang 
Authored: Mon Feb 26 18:49:01 2018 -0500
Committer: Eric Yang 
Committed: Mon Feb 26 18:49:01 2018 -0500

--
 .../org/apache/hadoop/yarn/service/MockServiceAM.java | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4f1ba14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
index 3e1582d..4373893 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
@@ -196,7 +196,15 @@ public class MockServiceAM extends ServiceMaster {
 
   @Override
   public RegisterApplicationMasterResponse registerApplicationMaster(
-  String appHostName, int appHostPort, String appTrackingUrl) {
+  String appHostName, int appHostPort, String appTrackingUrl,
+  Map placementConstraintsMap) throws YarnException, IOException {
+return this.registerApplicationMaster(appHostName, appHostPort,
+appTrackingUrl);
+  }
+
+  @Override
+public RegisterApplicationMasterResponse registerApplicationMaster(
+String appHostName, int appHostPort, String appTrackingUrl) {
 RegisterApplicationMasterResponse response = mock(
 RegisterApplicationMasterResponse.class);
 when(response.getResourceTypes()).thenReturn(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13187. RBF: Fix Routers information shown in the web UI. Contributed by Inigo Goiri and Wei Yan.

2018-02-26 Thread weiy
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 a6343ff80 -> 39e1f963b


HDFS-13187. RBF: Fix Routers information shown in the web UI. Contributed by 
Inigo Goiri and Wei Yan.

(cherry picked from commit 6ce9f79cc9b2107e5953a39d05b22966aff0b7ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39e1f963
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39e1f963
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39e1f963

Branch: refs/heads/branch-2.9
Commit: 39e1f963bb0ae13ee2a32bc01fed727c31eb0aee
Parents: a6343ff
Author: Wei Yan 
Authored: Mon Feb 26 15:13:41 2018 -0800
Committer: weiy 
Committed: Mon Feb 26 15:36:21 2018 -0800

--
 .../main/webapps/router/federationhealth.html   |  4 ++
 .../src/main/webapps/router/federationhealth.js | 64 
 2 files changed, 57 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39e1f963/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
index 2c6a6da..526c092 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
@@ -35,6 +35,7 @@
   
 Overview
 Subclusters
+Routers
 Datanodes
 Mount table
 
@@ -62,6 +63,7 @@
 
   
   
+  
   
   
 
@@ -245,6 +247,8 @@
 
 
 
+

hadoop git commit: HDFS-13187. RBF: Fix Routers information shown in the web UI. Contributed by Inigo Goiri and Wei Yan.

2018-02-26 Thread weiy
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0945f0eb2 -> 54803ebe4


HDFS-13187. RBF: Fix Routers information shown in the web UI. Contributed by 
Inigo Goiri and Wei Yan.

(cherry picked from commit 6ce9f79cc9b2107e5953a39d05b22966aff0b7ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54803ebe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54803ebe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54803ebe

Branch: refs/heads/branch-2
Commit: 54803ebe492e04e5b531904233e60e7abcea4698
Parents: 0945f0e
Author: Wei Yan 
Authored: Mon Feb 26 15:13:41 2018 -0800
Committer: weiy 
Committed: Mon Feb 26 15:32:27 2018 -0800

--
 .../main/webapps/router/federationhealth.html   |  4 ++
 .../src/main/webapps/router/federationhealth.js | 64 
 2 files changed, 57 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54803ebe/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
index b89e5aa..23d42d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
@@ -35,6 +35,7 @@
   
 Overview
 Subclusters
+Routers
 Datanodes
 Mount table
 
@@ -62,6 +63,7 @@
 
   
   
+  
   
   
 
@@ -245,6 +247,8 @@
 
 
 
+

hadoop git commit: HDFS-13187. RBF: Fix Routers information shown in the web UI. Contributed by Inigo Goiri and Wei Yan.

2018-02-26 Thread weiy
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1087b9af8 -> 1fb87df87


HDFS-13187. RBF: Fix Routers information shown in the web UI. Contributed by 
Inigo Goiri and Wei Yan.

(cherry picked from commit 6ce9f79cc9b2107e5953a39d05b22966aff0b7ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fb87df8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fb87df8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fb87df8

Branch: refs/heads/branch-3.0
Commit: 1fb87df87e6f422ce16edaa190b00536f564bfc8
Parents: 1087b9a
Author: Wei Yan 
Authored: Mon Feb 26 15:13:41 2018 -0800
Committer: weiy 
Committed: Mon Feb 26 15:29:11 2018 -0800

--
 .../main/webapps/router/federationhealth.html   |  4 ++
 .../src/main/webapps/router/federationhealth.js | 64 
 2 files changed, 57 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fb87df8/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
index 2c6a6da..526c092 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
@@ -35,6 +35,7 @@
   
 Overview
 Subclusters
+Routers
 Datanodes
 Mount table
 
@@ -62,6 +63,7 @@
 
   
   
+  
   
   
 
@@ -245,6 +247,8 @@
 
 
 
+

hadoop git commit: HDFS-13187. RBF: Fix Routers information shown in the web UI. Contributed by Inigo Goiri and Wei Yan.

2018-02-26 Thread weiy
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 0ccd7138a -> 6ca6ae791


HDFS-13187. RBF: Fix Routers information shown in the web UI. Contributed by 
Inigo Goiri and Wei Yan.

(cherry picked from commit 6ce9f79cc9b2107e5953a39d05b22966aff0b7ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ca6ae79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ca6ae79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ca6ae79

Branch: refs/heads/branch-3.1
Commit: 6ca6ae791080c74f9927f41be30724f65f95570c
Parents: 0ccd713
Author: Wei Yan 
Authored: Mon Feb 26 15:13:41 2018 -0800
Committer: Wei Yan 
Committed: Mon Feb 26 15:20:50 2018 -0800

--
 .../main/webapps/router/federationhealth.html   |  4 ++
 .../src/main/webapps/router/federationhealth.js | 64 
 2 files changed, 57 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ca6ae79/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
index b89e5aa..23d42d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
@@ -35,6 +35,7 @@
   
 Overview
 Subclusters
+Routers
 Datanodes
 Mount table
 
@@ -62,6 +63,7 @@
 
   
   
+  
   
   
 
@@ -245,6 +247,8 @@
 
 
 
+

hadoop git commit: HDFS-13187. RBF: Fix Routers information shown in the web UI. Contributed by Inigo Goiri and Wei Yan.

2018-02-26 Thread weiy
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7dd385098 -> 6ce9f79cc


HDFS-13187. RBF: Fix Routers information shown in the web UI. Contributed by 
Inigo Goiri and Wei Yan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ce9f79c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ce9f79c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ce9f79c

Branch: refs/heads/trunk
Commit: 6ce9f79cc9b2107e5953a39d05b22966aff0b7ff
Parents: 7dd3850
Author: Wei Yan 
Authored: Mon Feb 26 15:13:41 2018 -0800
Committer: Wei Yan 
Committed: Mon Feb 26 15:13:41 2018 -0800

--
 .../main/webapps/router/federationhealth.html   |  4 ++
 .../src/main/webapps/router/federationhealth.js | 64 
 2 files changed, 57 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ce9f79c/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
index b89e5aa..23d42d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/federationhealth.html
@@ -35,6 +35,7 @@
   
 Overview
 Subclusters
+Routers
 Datanodes
 Mount table
 
@@ -62,6 +63,7 @@
 
   
   
+  
   
   
 
@@ -245,6 +247,8 @@
 
 
 
+

[hadoop] Git Push Summary

2018-02-26 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-3 [deleted] 692260754

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7010. Make Job History File Permissions configurable. Contributed by Gergely Novák

2018-02-26 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/trunk 78a10029e -> 7dd385098


MAPREDUCE-7010. Make Job History File Permissions configurable. Contributed by 
Gergely Novák


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7dd38509
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7dd38509
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7dd38509

Branch: refs/heads/trunk
Commit: 7dd385098c7a3046e6b049e70669d5b726de79c9
Parents: 78a1002
Author: Billie Rinaldi 
Authored: Mon Feb 26 14:32:46 2018 -0800
Committer: Billie Rinaldi 
Committed: Mon Feb 26 14:32:46 2018 -0800

--
 .../jobhistory/JobHistoryEventHandler.java  |  4 +--
 .../mapreduce/v2/jobhistory/JHAdminConfig.java  |  4 +++
 .../v2/jobhistory/JobHistoryUtils.java  | 38 
 .../v2/jobhistory/TestJobHistoryUtils.java  | 24 +
 .../src/main/resources/mapred-default.xml   |  9 +
 5 files changed, 70 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dd38509/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index ae46129..fd93d07 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -231,8 +231,8 @@ public class JobHistoryEventHandler extends AbstractService
 try {
   doneDirPrefixPath =
   FileContext.getFileContext(conf).makeQualified(new 
Path(userDoneDirStr));
-  mkdir(doneDirFS, doneDirPrefixPath, new FsPermission(
-  JobHistoryUtils.HISTORY_INTERMEDIATE_USER_DIR_PERMISSIONS));
+  mkdir(doneDirFS, doneDirPrefixPath, JobHistoryUtils.
+  getConfiguredHistoryIntermediateUserDoneDirPermissions(conf));
 } catch (IOException e) {
   LOG.error("Error creating user intermediate history done directory: [ "
   + doneDirPrefixPath + "]", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dd38509/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
index 5097946..1cadf84 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
@@ -93,6 +93,10 @@ public class JHAdminConfig {
**/
   public static final String MR_HISTORY_INTERMEDIATE_DONE_DIR =
 MR_HISTORY_PREFIX + "intermediate-done-dir";
+  public static final String MR_HISTORY_INTERMEDIATE_USER_DONE_DIR_PERMISSIONS 
=
+  MR_HISTORY_PREFIX + "intermediate-user-done-dir.permissions";
+  public static final short
+  DEFAULT_MR_HISTORY_INTERMEDIATE_USER_DONE_DIR_PERMISSIONS = 0770;
   
   /** Size of the job list cache.*/
   public static final String MR_HISTORY_JOBLIST_CACHE_SIZE =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dd38509/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
index 58ac1db..5160ce8 100644
--- 

[44/59] [abbrv] hadoop git commit: YARN-7934. [GQ] Refactor preemption calculators to allow overriding for Federation Global Algos. (Contributed by curino)

2018-02-26 Thread xyao
YARN-7934. [GQ] Refactor preemption calculators to allow overriding for 
Federation Global Algos. (Contributed by curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/514794e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/514794e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/514794e1

Branch: refs/heads/HDFS-7240
Commit: 514794e1a5a39ca61de3981d53a05547ae17f5e4
Parents: 95904f6
Author: Carlo Curino 
Authored: Thu Feb 22 18:12:12 2018 -0800
Committer: Carlo Curino 
Committed: Thu Feb 22 18:12:12 2018 -0800

--
 .../AbstractPreemptableResourceCalculator.java  |  38 +--
 .../capacity/AbstractPreemptionEntity.java  |   4 +
 .../CapacitySchedulerPreemptionContext.java |   6 +-
 .../capacity/PreemptableResourceCalculator.java |  21 ++--
 .../monitor/capacity/TempQueuePerPartition.java | 106 +++
 .../webapp/dao/ResourceInfo.java|   5 +-
 6 files changed, 139 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/514794e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
index 5196831..2589970 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
@@ -18,6 +18,12 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.PriorityQueue;
+
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy.PriorityUtilizationQueueOrderingPolicy;
@@ -26,12 +32,6 @@ import 
org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.PriorityQueue;
-
 /**
  * Calculate how much resources need to be preempted for each queue,
  * will be used by {@link PreemptionCandidatesSelector}.
@@ -126,11 +126,18 @@ public class AbstractPreemptableResourceCalculator {
   TempQueuePerPartition q = i.next();
   Resource used = q.getUsed();
 
+  Resource initIdealAssigned;
   if (Resources.greaterThan(rc, totGuarant, used, q.getGuaranteed())) {
-q.idealAssigned = Resources.add(q.getGuaranteed(), q.untouchableExtra);
+initIdealAssigned =
+Resources.add(q.getGuaranteed(), q.untouchableExtra);
   } else {
-q.idealAssigned = Resources.clone(used);
+initIdealAssigned = Resources.clone(used);
   }
+
+  // perform initial assignment
+  initIdealAssignment(totGuarant, q, initIdealAssigned);
+
+
   Resources.subtractFrom(unassigned, q.idealAssigned);
   // If idealAssigned < (allocated + used + pending), q needs more
   // resources, so
@@ -188,6 +195,21 @@ public class AbstractPreemptableResourceCalculator {
 }
   }
 
+
+  /**
+   * This method is visible to allow sub-classes to override the initialization
+   * behavior.
+   *
+   * @param totGuarant total resources (useful for {@code ResourceCalculator}
+   *  operations)
+   * @param q the {@code TempQueuePerPartition} being initialized
+   * @param initIdealAssigned the proposed initialization value.
+   */
+  protected void initIdealAssignment(Resource totGuarant,
+  TempQueuePerPartition q, Resource initIdealAssigned) {
+q.idealAssigned = initIdealAssigned;
+  }
+
   /**
* Computes a normalizedGuaranteed capacity based on active queues.
*


[57/59] [abbrv] hadoop git commit: HADOOP-15254. Correct the wrong word spelling 'intialize'. Contributed by fang zhenyi.

2018-02-26 Thread xyao
HADOOP-15254. Correct the wrong word spelling 'intialize'. Contributed by fang 
zhenyi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fa7963c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fa7963c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fa7963c

Branch: refs/heads/HDFS-7240
Commit: 2fa7963c3d8cdfc65f90efabc6fe51a160be5c78
Parents: c30a26a
Author: Arpit Agarwal 
Authored: Sat Feb 24 14:41:55 2018 -0800
Committer: Arpit Agarwal 
Committed: Sat Feb 24 14:41:55 2018 -0800

--
 .../src/main/java/org/apache/hadoop/log/Log4Json.java   |  2 +-
 .../apache/hadoop/crypto/key/kms/server/KMSWebApp.java  |  2 +-
 .../federation/store/driver/StateStoreDriver.java   |  2 +-
 .../namenode/web/resources/NamenodeWebHdfsMethods.java  |  2 +-
 .../namenode/web/resources/TestWebHdfsDataLocality.java |  2 +-
 .../org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java |  2 +-
 .../org/apache/hadoop/mapreduce/v2/hs/JobHistory.java   |  2 +-
 .../java/org/apache/hadoop/streaming/StreamJob.java |  2 +-
 .../QueuePriorityContainerCandidateSelector.java|  4 ++--
 .../resourcemanager/TestResourceTrackerService.java | 12 ++--
 10 files changed, 16 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fa7963c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/Log4Json.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/Log4Json.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/Log4Json.java
index a2bbbfc..68cf680 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/Log4Json.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/Log4Json.java
@@ -103,7 +103,7 @@ public class Log4Json extends Layout {
   /**
* Jackson factories are thread safe when constructing parsers and 
generators.
* They are not thread safe in configure methods; if there is to be any
-   * configuration it must be done in a static intializer block.
+   * configuration it must be done in a static initializer block.
*/
   private static final JsonFactory factory = new MappingJsonFactory();
   private static final ObjectReader READER = new 
ObjectMapper(factory).reader();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fa7963c/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index 9a71fa2..1817a13 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -152,7 +152,7 @@ public class KMSWebApp implements ServletContextListener {
 
   kmsAudit = new KMSAudit(kmsConf);
 
-  // intializing the KeyProvider
+  // initializing the KeyProvider
   String providerString = kmsConf.get(KMSConfiguration.KEY_PROVIDER_URI);
   if (providerString == null) {
 throw new IllegalStateException("No KeyProvider has been defined");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fa7963c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java
index c9b1ce6..d595a97 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java
@@ -73,7 +73,7 @@ public abstract class StateStoreDriver implements 
StateStoreRecordOperations {
 
 boolean success = initDriver();
 if (!success) {
-  LOG.error("Cannot intialize driver for {}", getDriverName());
+  LOG.error("Cannot initialize driver for {}", getDriverName());
   return false;
 }
 


[53/59] [abbrv] hadoop git commit: HADOOP-15255. Upper/Lower case conversion support for group names in LdapGroupsMapping. Contributed by Nanda kumar.

2018-02-26 Thread xyao
HADOOP-15255. Upper/Lower case conversion support for group names in 
LdapGroupsMapping. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/033f9c68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/033f9c68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/033f9c68

Branch: refs/heads/HDFS-7240
Commit: 033f9c68ea82b6293e760df457bb6eb23c6889c8
Parents: 68ce193
Author: Arpit Agarwal 
Authored: Fri Feb 23 15:37:17 2018 -0800
Committer: Arpit Agarwal 
Committed: Fri Feb 23 15:37:17 2018 -0800

--
 .../security/RuleBasedLdapGroupsMapping.java| 91 ++
 .../src/main/resources/core-default.xml | 13 +++
 .../conf/TestCommonConfigurationFields.java |  4 +-
 .../TestRuleBasedLdapGroupsMapping.java | 99 
 4 files changed, 206 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/033f9c68/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java
new file mode 100644
index 000..6accf2f
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * This class uses {@link LdapGroupsMapping} for group lookup and applies the
+ * rule configured on the group names.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Evolving
+public class RuleBasedLdapGroupsMapping extends LdapGroupsMapping {
+
+  public static final String CONVERSION_RULE_KEY = LDAP_CONFIG_PREFIX +
+  ".conversion.rule";
+
+  private static final String CONVERSION_RULE_DEFAULT = "none";
+  private static final Logger LOG =
+  LoggerFactory.getLogger(RuleBasedLdapGroupsMapping.class);
+
+  private Rule rule;
+
+  /**
+   * Supported rules applicable for group name modification.
+   */
+  private enum Rule {
+TO_UPPER, TO_LOWER, NONE
+  }
+
+  @Override
+  public synchronized void setConf(Configuration conf) {
+super.setConf(conf);
+String value = conf.get(CONVERSION_RULE_KEY, CONVERSION_RULE_DEFAULT);
+try {
+  rule = Rule.valueOf(value.toUpperCase());
+} catch (IllegalArgumentException iae) {
+  LOG.warn("Invalid {} configured: '{}'. Using default value: '{}'",
+  CONVERSION_RULE_KEY, value, CONVERSION_RULE_DEFAULT);
+}
+  }
+
+/**
+ * Returns list of groups for a user.
+ * This calls {@link LdapGroupsMapping}'s getGroups and applies the
+ * configured rules on group names before returning.
+ *
+ * @param user get groups for this user
+ * @return list of groups for a given user
+ */
+  @Override
+  public synchronized List getGroups(String user) {
+List groups = super.getGroups(user);
+switch (rule) {
+case TO_UPPER:
+  return groups.stream().map(StringUtils::toUpperCase).collect(
+  Collectors.toList());
+case TO_LOWER:
+  return groups.stream().map(StringUtils::toLowerCase).collect(
+  Collectors.toList());
+case NONE:
+default:
+  return groups;
+}
+  }
+
+}


[55/59] [abbrv] hadoop git commit: HDFS-13052. WebHDFS: Add support for snasphot diff. Contributed by Lokesh Jain.

2018-02-26 Thread xyao
HDFS-13052. WebHDFS: Add support for snasphot diff. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e84e46f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e84e46f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e84e46f

Branch: refs/heads/HDFS-7240
Commit: 1e84e46f1621fe694f806bfc41d3b2a06c9500b6
Parents: 329a4fd
Author: Xiaoyu Yao 
Authored: Fri Feb 23 16:56:29 2018 -0800
Committer: Xiaoyu Yao 
Committed: Fri Feb 23 19:35:12 2018 -0800

--
 .../hadoop/hdfs/DFSOpsCountStatistics.java  |  1 +
 .../hdfs/protocol/SnapshotDiffReport.java   |  4 ++
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 49 +
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 15 
 .../hadoop/hdfs/web/resources/GetOpParam.java   |  3 +-
 .../web/resources/NamenodeWebHdfsMethods.java   | 33 +++--
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 33 +
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 72 +++-
 8 files changed, 201 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e84e46f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
index 4b2a761..bbd1bd7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
@@ -87,6 +87,7 @@ public class DFSOpsCountStatistics extends StorageStatistics {
 SET_STORAGE_POLICY("op_set_storagePolicy"),
 SET_TIMES(CommonStatisticNames.OP_SET_TIMES),
 SET_XATTR("op_set_xattr"),
+GET_SNAPSHOT_DIFF("op_get_snapshot_diff"),
 TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
 UNSET_STORAGE_POLICY("op_unset_storage_policy");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e84e46f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java
index be3b94d..8ee4ec7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java
@@ -69,6 +69,10 @@ public class SnapshotDiffReport {
   }
   return null;
 }
+
+public static DiffType parseDiffType(String s){
+  return DiffType.valueOf(s.toUpperCase());
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e84e46f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index c274c49..2725e9c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -49,6 +50,7 @@ import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
 
@@ -693,4 +695,51 @@ class JsonUtilClient {
 encryptDataTransfer, trashInterval, type, 

[50/59] [abbrv] hadoop git commit: YARN-5714. ContainerExecutor does not order environment map. Contributed by Remi Catherinot and Jim Brennan

2018-02-26 Thread xyao
YARN-5714. ContainerExecutor does not order environment map. Contributed by 
Remi Catherinot and Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e728f39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e728f39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e728f39

Branch: refs/heads/HDFS-7240
Commit: 8e728f39c961f034369b43e087d68d01aa4a0e7d
Parents: 59cf758
Author: Jason Lowe 
Authored: Fri Feb 23 15:46:35 2018 -0600
Committer: Jason Lowe 
Committed: Fri Feb 23 15:46:35 2018 -0600

--
 .../server/nodemanager/ContainerExecutor.java   |   3 +-
 .../launcher/ContainerLaunch.java   | 194 ++-
 .../launcher/TestContainerLaunch.java   | 337 +++
 3 files changed, 531 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e728f39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index f4279a3..d9f2ea3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -361,7 +361,8 @@ public abstract class ContainerExecutor implements 
Configurable {
 
 if (environment != null) {
   sb.echo("Setting up env variables");
-  for (Map.Entry env : environment.entrySet()) {
+  for (Map.Entry env :
+  sb.orderEnvByDependencies(environment).entrySet()) {
 sb.env(env.getKey(), env.getValue());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e728f39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 112f54a..b89cf6f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -30,11 +30,16 @@ import java.io.PrintStream;
 import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -1121,8 +1126,14 @@ public class ContainerLaunch implements 
Callable {
 
   public static abstract class ShellScriptBuilder {
 public static ShellScriptBuilder create() {
-  return Shell.WINDOWS ? new WindowsShellScriptBuilder() :
-new UnixShellScriptBuilder();
+  return create(Shell.osType);
+}
+
+@VisibleForTesting
+public static ShellScriptBuilder create(Shell.OSType osType) {
+  return (osType == Shell.OSType.OS_TYPE_WIN) ?
+  new WindowsShellScriptBuilder() :
+  new UnixShellScriptBuilder();
 }
 
 private static final String LINE_SEPARATOR =
@@ -1248,6 +1259,72 @@ public class ContainerLaunch implements 
Callable {
   return redirectStdErr;
 }
 
+/**
+ * Parse an environment value and returns all environment keys it uses.
+ * @param envVal an environment variable's value
+ * @return all environment variable names used in 

[51/59] [abbrv] hadoop git commit: HDFS-13164. File not closed if streamer fail with DSQuotaExceededException.

2018-02-26 Thread xyao
HDFS-13164. File not closed if streamer fail with DSQuotaExceededException.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51088d32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51088d32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51088d32

Branch: refs/heads/HDFS-7240
Commit: 51088d323359587dca7831f74c9d065c2fccc60d
Parents: 8e728f3
Author: Xiao Chen 
Authored: Fri Feb 23 13:47:39 2018 -0800
Committer: Xiao Chen 
Committed: Fri Feb 23 13:49:09 2018 -0800

--
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  63 +--
 .../hadoop/hdfs/client/impl/LeaseRenewer.java   |   2 +-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  | 107 +++
 3 files changed, 163 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51088d32/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 7849796..9734752 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -852,7 +852,19 @@ public class DFSOutputStream extends FSOutputSummer
 
   protected synchronized void closeImpl() throws IOException {
 if (isClosed()) {
-  getStreamer().getLastException().check(true);
+  LOG.debug("Closing an already closed stream. [Stream:{}, streamer:{}]",
+  closed, getStreamer().streamerClosed());
+  try {
+getStreamer().getLastException().check(true);
+  } catch (IOException ioe) {
+cleanupAndRethrowIOException(ioe);
+  } finally {
+if (!closed) {
+  // If stream is not closed but streamer closed, clean up the stream.
+  // Most importantly, end the file lease.
+  closeThreads(true);
+}
+  }
   return;
 }
 
@@ -867,14 +879,12 @@ public class DFSOutputStream extends FSOutputSummer
 setCurrentPacketToEmpty();
   }
 
-  flushInternal(); // flush all data to Datanodes
-  // get last block before destroying the streamer
-  ExtendedBlock lastBlock = getStreamer().getBlock();
-
-  try (TraceScope ignored =
-   dfsClient.getTracer().newScope("completeFile")) {
-completeFile(lastBlock);
+  try {
+flushInternal(); // flush all data to Datanodes
+  } catch (IOException ioe) {
+cleanupAndRethrowIOException(ioe);
   }
+  completeFile();
 } catch (ClosedChannelException ignored) {
 } finally {
   // Failures may happen when flushing data.
@@ -886,6 +896,43 @@ public class DFSOutputStream extends FSOutputSummer
 }
   }
 
+  private void completeFile() throws IOException {
+// get last block before destroying the streamer
+ExtendedBlock lastBlock = getStreamer().getBlock();
+try (TraceScope ignored =
+dfsClient.getTracer().newScope("completeFile")) {
+  completeFile(lastBlock);
+}
+  }
+
+  /**
+   * Determines whether an IOException thrown needs extra cleanup on the 
stream.
+   * Space quota exceptions will be thrown when getting new blocks, so the
+   * open HDFS file need to be closed.
+   *
+   * @param ioe the IOException
+   * @return whether the stream needs cleanup for the given IOException
+   */
+  private boolean exceptionNeedsCleanup(IOException ioe) {
+return ioe instanceof DSQuotaExceededException
+|| ioe instanceof QuotaByStorageTypeExceededException;
+  }
+
+  private void cleanupAndRethrowIOException(IOException ioe)
+  throws IOException {
+if (exceptionNeedsCleanup(ioe)) {
+  final MultipleIOException.Builder b = new MultipleIOException.Builder();
+  b.add(ioe);
+  try {
+completeFile();
+  } catch (IOException e) {
+b.add(e);
+throw b.build();
+  }
+}
+throw ioe;
+  }
+
   // should be called holding (this) lock since setTestFilename() may
   // be called during unit tests
   protected void completeFile(ExtendedBlock last) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51088d32/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
 

[58/59] [abbrv] hadoop git commit: HDFS-12070. Failed block recovery leaves files open indefinitely and at risk for data loss. Contributed by Kihwal Lee.

2018-02-26 Thread xyao
HDFS-12070. Failed block recovery leaves files open indefinitely and at risk 
for data loss. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/451265a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/451265a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/451265a8

Branch: refs/heads/HDFS-7240
Commit: 451265a83d8798624ae2a144bc58fa41db826704
Parents: 2fa7963
Author: Kihwal Lee 
Authored: Mon Feb 26 10:28:04 2018 -0600
Committer: Kihwal Lee 
Committed: Mon Feb 26 10:28:04 2018 -0600

--
 .../server/datanode/BlockRecoveryWorker.java|  6 +--
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 44 
 2 files changed, 46 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/451265a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 2ecd986..94835e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -307,10 +307,8 @@ public class BlockRecoveryWorker {
 }
   }
 
-  // If any of the data-nodes failed, the recovery fails, because
-  // we never know the actual state of the replica on failed data-nodes.
-  // The recovery should be started over.
-  if (!failedList.isEmpty()) {
+  // Abort if all failed.
+  if (successList.isEmpty()) {
 throw new IOException("Cannot recover " + block
 + ", the following datanodes failed: " + failedList);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/451265a8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index d62194c..c82b47c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -228,6 +228,50 @@ public class TestLeaseRecovery {
   }
 
   /**
+   * Block/lease recovery should be retried with failed nodes from the second
+   * stage removed to avoid perpetual recovery failures.
+   */
+  @Test
+  public void testBlockRecoveryRetryAfterFailedRecovery() throws Exception {
+Configuration conf = new Configuration();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+Path file = new Path("/testBlockRecoveryRetryAfterFailedRecovery");
+DistributedFileSystem dfs = cluster.getFileSystem();
+
+// Create a file.
+FSDataOutputStream out = dfs.create(file);
+final int FILE_SIZE = 128 * 1024;
+int count = 0;
+while (count < FILE_SIZE) {
+  out.writeBytes("DE K9SUL");
+  count += 8;
+}
+out.hsync();
+
+// Abort the original stream.
+((DFSOutputStream) out.getWrappedStream()).abort();
+
+LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
+file.toString(), 0, count);
+ExtendedBlock block = locations.get(0).getBlock();
+
+// Finalize one replica to simulate a partial close failure.
+cluster.getDataNodes().get(0).getFSDataset().finalizeBlock(block, false);
+// Delete the meta file to simulate a rename/move failure.
+cluster.deleteMeta(0, block);
+
+// Try to recover the lease.
+DistributedFileSystem newDfs = (DistributedFileSystem) FileSystem
+.newInstance(cluster.getConfiguration(0));
+count = 0;
+while (count++ < 15 && !newDfs.recoverLease(file)) {
+  Thread.sleep(1000);
+}
+// The lease should have been recovered.
+assertTrue("File should be closed", newDfs.recoverLease(file));
+  }
+
+  /**
* Recover the lease on a file and append file from another client.
*/
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/59] [abbrv] hadoop git commit: YARN-7836. Added error check for updating service components. (Contributed by Gour Saha)

2018-02-26 Thread xyao
YARN-7836.  Added error check for updating service components.
(Contributed by Gour Saha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19096900
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19096900
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19096900

Branch: refs/heads/HDFS-7240
Commit: 190969006d4a7f9ef86d67bba472f7dc5642668a
Parents: 84a1321
Author: Eric Yang 
Authored: Thu Feb 22 16:08:30 2018 -0500
Committer: Eric Yang 
Committed: Thu Feb 22 16:08:30 2018 -0500

--
 .../hadoop/yarn/service/webapp/ApiServer.java   | 23 +--
 .../hadoop/yarn/service/TestApiServer.java  | 69 
 2 files changed, 75 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19096900/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index e58938e..1528596 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -280,14 +280,25 @@ public class ApiServer {
   @PathParam(COMPONENT_NAME) String componentName, Component component) {
 
 try {
-  UserGroupInformation ugi = getProxyUser(request);
+  if (component == null) {
+throw new YarnException("No component data provided");
+  }
+  if (component.getName() != null
+  && !component.getName().equals(componentName)) {
+String msg = "Component name in the request object ("
++ component.getName() + ") does not match that in the URI path ("
++ componentName + ")";
+throw new YarnException(msg);
+  }
+  if (component.getNumberOfContainers() == null) {
+throw new YarnException("No container count provided");
+  }
   if (component.getNumberOfContainers() < 0) {
-String message =
-"Service = " + appName + ", Component = " + component.getName()
-+ ": Invalid number of containers specified " + component
-.getNumberOfContainers();
+String message = "Invalid number of containers specified "
++ component.getNumberOfContainers();
 throw new YarnException(message);
   }
+  UserGroupInformation ugi = getProxyUser(request);
   Map original = ugi
   .doAs(new PrivilegedExceptionAction>() {
 @Override
@@ -296,7 +307,7 @@ public class ApiServer {
   sc.init(YARN_CONFIG);
   sc.start();
   Map original = sc.flexByRestService(appName,
-  Collections.singletonMap(component.getName(),
+  Collections.singletonMap(componentName,
   component.getNumberOfContainers()));
   sc.close();
   return original;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19096900/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
index 52057db..4629d28 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
@@ -17,6 +17,16 @@
 
 package org.apache.hadoop.yarn.service;
 
+import static org.junit.Assert.*;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.Path;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
 import org.apache.hadoop.conf.Configuration;
 import 

[33/59] [abbrv] hadoop git commit: HADOOP-12897. KerberosAuthenticator.authenticate to include URL on IO failures. Contributed by Ajay Kumar.

2018-02-26 Thread xyao
HADOOP-12897. KerberosAuthenticator.authenticate to include URL on IO failures. 
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0d3c877
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0d3c877
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0d3c877

Branch: refs/heads/HDFS-7240
Commit: b0d3c877e30312820124cac2eff737fddac9e484
Parents: 324e5a7
Author: Arpit Agarwal 
Authored: Tue Feb 20 18:18:58 2018 -0800
Committer: Arpit Agarwal 
Committed: Tue Feb 20 18:18:58 2018 -0800

--
 .../client/KerberosAuthenticator.java   | 80 +---
 .../client/TestKerberosAuthenticator.java   | 29 +++
 .../hadoop/http/TestHttpServerWithSpengo.java   |  5 +-
 .../org/apache/hadoop/log/TestLogLevel.java | 18 -
 .../delegation/web/TestWebDelegationToken.java  |  4 +-
 5 files changed, 101 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0d3c877/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index 942d13c..64d4330 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -13,6 +13,8 @@
  */
 package org.apache.hadoop.security.authentication.client;
 
+import com.google.common.annotations.VisibleForTesting;
+import java.lang.reflect.Constructor;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.security.authentication.server.HttpConstants;
 import org.apache.hadoop.security.authentication.util.AuthToken;
@@ -177,41 +179,65 @@ public class KerberosAuthenticator implements 
Authenticator {
*/
   @Override
   public void authenticate(URL url, AuthenticatedURL.Token token)
-throws IOException, AuthenticationException {
+  throws IOException, AuthenticationException {
 if (!token.isSet()) {
   this.url = url;
   base64 = new Base64(0);
-  HttpURLConnection conn = token.openConnection(url, connConfigurator);
-  conn.setRequestMethod(AUTH_HTTP_METHOD);
-  conn.connect();
-  
-  boolean needFallback = false;
-  if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
-LOG.debug("JDK performed authentication on our behalf.");
-// If the JDK already did the SPNEGO back-and-forth for
-// us, just pull out the token.
-AuthenticatedURL.extractToken(conn, token);
-if (isTokenKerberos(token)) {
-  return;
+  try {
+HttpURLConnection conn = token.openConnection(url, connConfigurator);
+conn.setRequestMethod(AUTH_HTTP_METHOD);
+conn.connect();
+
+boolean needFallback = false;
+if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
+  LOG.debug("JDK performed authentication on our behalf.");
+  // If the JDK already did the SPNEGO back-and-forth for
+  // us, just pull out the token.
+  AuthenticatedURL.extractToken(conn, token);
+  if (isTokenKerberos(token)) {
+return;
+  }
+  needFallback = true;
 }
-needFallback = true;
-  }
-  if (!needFallback && isNegotiate(conn)) {
-LOG.debug("Performing our own SPNEGO sequence.");
-doSpnegoSequence(token);
-  } else {
-LOG.debug("Using fallback authenticator sequence.");
-Authenticator auth = getFallBackAuthenticator();
-// Make sure that the fall back authenticator have the same
-// ConnectionConfigurator, since the method might be overridden.
-// Otherwise the fall back authenticator might not have the information
-// to make the connection (e.g., SSL certificates)
-auth.setConnectionConfigurator(connConfigurator);
-auth.authenticate(url, token);
+if (!needFallback && isNegotiate(conn)) {
+  LOG.debug("Performing our own SPNEGO sequence.");
+  doSpnegoSequence(token);
+} else {
+  LOG.debug("Using fallback authenticator sequence.");
+  Authenticator auth = getFallBackAuthenticator();
+  // Make sure that the fall back authenticator have the same
+  // ConnectionConfigurator, since the method might be overridden.
+  

[48/59] [abbrv] hadoop git commit: HADOOP-15007. Stabilize and document Configuration element. Contributed by Ajay Kumar.

2018-02-26 Thread xyao
HADOOP-15007. Stabilize and document Configuration  element. Contributed 
by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3688e491
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3688e491
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3688e491

Branch: refs/heads/HDFS-7240
Commit: 3688e491d528edb9efe54c4ef110d2ded62db3e8
Parents: d1cd573
Author: Anu Engineer 
Authored: Fri Feb 23 10:26:22 2018 -0800
Committer: Anu Engineer 
Committed: Fri Feb 23 10:26:22 2018 -0800

--
 .../org/apache/hadoop/conf/Configuration.java   | 145 +++
 .../org/apache/hadoop/conf/CorePropertyTag.java |  37 -
 .../org/apache/hadoop/conf/HDFSPropertyTag.java |  41 --
 .../org/apache/hadoop/conf/PropertyTag.java |  30 
 .../org/apache/hadoop/conf/YarnPropertyTag.java |  39 -
 .../fs/CommonConfigurationKeysPublic.java   |   2 +
 .../src/main/resources/core-default.xml |   8 +
 .../apache/hadoop/conf/TestConfiguration.java   | 121 
 8 files changed, 153 insertions(+), 270 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3688e491/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index f8e4638..00b4702 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -83,7 +83,6 @@ import javax.xml.transform.stream.StreamResult;
 
 import com.google.common.base.Charsets;
 import org.apache.commons.collections.map.UnmodifiableMap;
-import org.apache.commons.io.FilenameUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -195,6 +194,30 @@ import static 
org.apache.commons.lang3.StringUtils.isNotBlank;
  * parameters and these are suppressible by configuring
  * log4j.logger.org.apache.hadoop.conf.Configuration.deprecation in
  * log4j.properties file.
+ *
+ * Tags
+ *
+ * Optionally we can tag related properties together by using tag
+ * attributes. System tags are defined by hadoop.system.tags property. Users
+ * can define there own custom tags in  hadoop.custom.tags property.
+ *
+ * For example, we can tag existing property as:
+ * 
+ *  property
+ *namedfs.replication/name
+ *value3/value
+ *tagHDFS,REQUIRED/tag
+ *  /property
+ *
+ *  property
+ *namedfs.data.transfer.protection/name
+ *value3/value
+ *tagHDFS,SECURITY/tag
+ *  /property
+ * 
+ *  Properties marked with tags can be retrieved with conf
+ * .getAllPropertiesByTag("HDFS") or conf.getAllPropertiesByTags
+ * (Arrays.asList("YARN","SECURITY")).
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
@@ -206,6 +229,7 @@ public class Configuration implements 
Iterable>,
   private static final Logger LOG_DEPRECATION =
   LoggerFactory.getLogger(
   "org.apache.hadoop.conf.Configuration.deprecation");
+  private static final Set TAGS = new HashSet<>();
 
   private boolean quietmode = true;
 
@@ -297,14 +321,9 @@ public class Configuration implements 
Iterable>,
 new WeakHashMap();
 
   /**
-   * Map to register all classes holding property tag enums.
-   */
-  private static final Map
-  REGISTERED_TAG_CLASS = new HashMap<>();
-  /**
* Map to hold properties by there tag groupings.
*/
-  private final Map propertyTagsMap =
+  private final Map propertyTagsMap =
   new ConcurrentHashMap<>();
 
   /**
@@ -785,11 +804,6 @@ public class Configuration implements 
Iterable>,
   public Configuration(boolean loadDefaults) {
 this.loadDefaults = loadDefaults;
 
-// Register all classes holding property tags with
-REGISTERED_TAG_CLASS.put("core", CorePropertyTag.class);
-REGISTERED_TAG_CLASS.put("hdfs", HDFSPropertyTag.class);
-REGISTERED_TAG_CLASS.put("yarn", YarnPropertyTag.class);
-
 synchronized(Configuration.class) {
   REGISTRY.put(this, null);
 }
@@ -820,7 +834,6 @@ public class Configuration implements 
Iterable>,
   this.finalParameters = Collections.newSetFromMap(
   new ConcurrentHashMap());
   

[30/59] [abbrv] hadoop git commit: YARN-7732. Support Generic AM Simulator from SynthGenerator. (Contributed by Young Chen via curino)

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java
 
b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java
index 2b1971a..794cd47 100644
--- 
a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java
+++ 
b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java
@@ -17,20 +17,25 @@
  */
 package org.apache.hadoop.yarn.sls;
 
+import org.apache.commons.math3.random.JDKRandomGenerator;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.tools.rumen.TaskAttemptInfo;
 import org.apache.hadoop.yarn.sls.synthetic.SynthJob;
 import org.apache.hadoop.yarn.sls.synthetic.SynthTraceJobProducer;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.log4j.Logger;
 import org.junit.Assert;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.Arrays;
 
 import static org.junit.Assert.assertTrue;
 
+import static org.codehaus.jackson.JsonParser.Feature.INTERN_FIELD_NAMES;
+import static 
org.codehaus.jackson.map.DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES;
+
 /**
  * Simple test class driving the {@code SynthTraceJobProducer}, and validating
  * jobs produce are within expected range.
@@ -38,10 +43,60 @@ import static org.junit.Assert.assertTrue;
 public class TestSynthJobGeneration {
 
   public final static Logger LOG =
-  Logger.getLogger(TestSynthJobGeneration.class);
+  LoggerFactory.getLogger(TestSynthJobGeneration.class);
 
   @Test
-  public void test() throws IllegalArgumentException, IOException {
+  public void testWorkloadGenerateTime()
+  throws IllegalArgumentException, IOException {
+
+String workloadJson = "{\"job_classes\": [], \"time_distribution\":["
++ "{\"time\": 0, \"weight\": 1}, " + "{\"time\": 30, \"weight\": 0},"
++ "{\"time\": 60, \"weight\": 2}," + "{\"time\": 90, \"weight\": 1}"
++ "]}";
+
+ObjectMapper mapper = new ObjectMapper();
+mapper.configure(INTERN_FIELD_NAMES, true);
+mapper.configure(FAIL_ON_UNKNOWN_PROPERTIES, false);
+SynthTraceJobProducer.Workload wl =
+mapper.readValue(workloadJson, SynthTraceJobProducer.Workload.class);
+
+JDKRandomGenerator rand = new JDKRandomGenerator();
+rand.setSeed(0);
+
+wl.init(rand);
+
+int bucket0 = 0;
+int bucket1 = 0;
+int bucket2 = 0;
+int bucket3 = 0;
+for (int i = 0; i < 1000; ++i) {
+  long time = wl.generateSubmissionTime();
+  LOG.info("Generated time " + time);
+  if (time < 30) {
+bucket0++;
+  } else if (time < 60) {
+bucket1++;
+  } else if (time < 90) {
+bucket2++;
+  } else {
+bucket3++;
+  }
+}
+
+Assert.assertTrue(bucket0 > 0);
+Assert.assertTrue(bucket1 == 0);
+Assert.assertTrue(bucket2 > 0);
+Assert.assertTrue(bucket3 > 0);
+Assert.assertTrue(bucket2 > bucket0);
+Assert.assertTrue(bucket2 > bucket3);
+
+LOG.info("bucket0 {}, bucket1 {}, bucket2 {}, bucket3 {}", bucket0, 
bucket1,
+bucket2, bucket3);
+
+  }
+
+  @Test
+  public void testMapReduce() throws IllegalArgumentException, IOException {
 
 Configuration conf = new Configuration();
 
@@ -50,47 +105,155 @@ public class TestSynthJobGeneration {
 
 SynthTraceJobProducer stjp = new SynthTraceJobProducer(conf);
 
+LOG.info(stjp.toString());
+
 SynthJob js = (SynthJob) stjp.getNextJob();
 
 int jobCount = 0;
 
 while (js != null) {
-  LOG.info((jobCount++) + " " + js.getQueueName() + " -- "
-  + js.getJobClass().getClassName() + " (conf: "
-  + js.getJobConf().get(MRJobConfig.QUEUE_NAME) + ") " + " submission: 
"
-  + js.getSubmissionTime() + ", " + " duration: " + js.getDuration()
-  + " numMaps: " + js.getNumberMaps() + " numReduces: "
-  + js.getNumberReduces());
+  LOG.info(js.toString());
+  validateJob(js);
+  js = (SynthJob) stjp.getNextJob();
+  jobCount++;
+}
 
+Assert.assertEquals(stjp.getNumJobs(), jobCount);
+  }
+
+  @Test
+  public void testGeneric() throws IllegalArgumentException, IOException {
+Configuration conf = new Configuration();
+
+conf.set(SynthTraceJobProducer.SLS_SYNTHETIC_TRACE_FILE,
+"src/test/resources/syn_generic.json");
+
+SynthTraceJobProducer stjp = new SynthTraceJobProducer(conf);
+
+LOG.info(stjp.toString());
+
+SynthJob js = (SynthJob) stjp.getNextJob();
+
+int jobCount = 0;
+

[08/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
deleted file mode 100644
index f938185..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
+++ /dev/null
@@ -1,593 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.service.AbstractService;
-import  org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
-import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
-import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
-import 
org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import 

[35/59] [abbrv] hadoop git commit: YARN-7223. Document GPU isolation feature. Contributed by Wangda Tan.

2018-02-26 Thread xyao
YARN-7223. Document GPU isolation feature. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86b227a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86b227a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86b227a1

Branch: refs/heads/HDFS-7240
Commit: 86b227a1fbe26b992c5498cfdd3b1691b4362ee9
Parents: 121e1e1
Author: Sunil G 
Authored: Wed Feb 21 14:16:45 2018 +0530
Committer: Sunil G 
Committed: Wed Feb 21 14:16:45 2018 +0530

--
 .../src/site/markdown/UsingGpus.md  | 230 +++
 1 file changed, 230 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86b227a1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/UsingGpus.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/UsingGpus.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/UsingGpus.md
new file mode 100644
index 000..f6000e7
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/UsingGpus.md
@@ -0,0 +1,230 @@
+
+
+
+# Using GPU On YARN
+# Prerequisites
+
+- As of now, only Nvidia GPUs are supported by YARN
+- YARN node managers have to be pre-installed with Nvidia drivers.
+- When Docker is used as container runtime context, nvidia-docker 1.0 needs to 
be installed (Current supported version in YARN for nvidia-docker).
+
+# Configs
+
+## GPU scheduling
+
+In `resource-types.xml`
+
+Add following properties
+
+```
+
+  
+ yarn.resource-types
+ yarn.io/gpu
+  
+
+```
+
+In `yarn-site.xml`
+
+`DominantResourceCalculator` MUST be configured to enable GPU 
scheduling/isolation.
+
+For `Capacity Scheduler`, use following property to configure 
`DominantResourceCalculator` (In `capacity-scheduler.xml`):
+
+| Property | Default value |
+| --- | --- |
+|  yarn.scheduler.capacity.resource-calculator | 
org.apache.hadoop.yarn.util.resource.DominantResourceCalculator |
+
+
+## GPU Isolation
+
+### In `yarn-site.xml`
+
+```
+  
+yarn.nodemanager.resource-plugins
+yarn.io/gpu
+  
+```
+
+This is to enable GPU isolation module on NodeManager side.
+
+By default, YARN will automatically detect and config GPUs when above config 
is set. Following configs need to be set in `yarn-site.xml` only if admin has 
specialized requirements.
+
+**1) Allowed GPU Devices**
+
+| Property | Default value |
+| --- | --- |
+| yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices | auto |
+
+  Specify GPU devices which can be managed by YARN NodeManager (split by 
comma).
+  Number of GPU devices will be reported to RM to make scheduling decisions.
+  Set to auto (default) let YARN automatically discover GPU resource from
+  system.
+
+  Manually specify GPU devices if auto detect GPU device failed or admin
+  only want subset of GPU devices managed by YARN. GPU device is identified
+  by their minor device number and index. A common approach to get minor
+  device number of GPUs is using `nvidia-smi -q` and search `Minor Number`
+  output.
+
+  When minor numbers are specified manually, admin needs to include indice of 
GPUs
+  as well, format is `index:minor_number[,index:minor_number...]`. An example
+  of manual specification is `0:0,1:1,2:2,3:4"`to allow YARN NodeManager to
+  manage GPU devices with indices `0/1/2/3` and minor number `0/1/2/4`.
+  numbers .
+
+**2) Executable to discover GPUs**
+
+| Property | value |
+| --- | --- |
+| yarn.nodemanager.resource-plugins.gpu.path-to-discovery-executables | 
/absolute/path/to/nvidia-smi |
+
+When `yarn.nodemanager.resource.gpu.allowed-gpu-devices=auto` specified,
+YARN NodeManager needs to run GPU discovery binary (now only support
+`nvidia-smi`) to get GPU-related information.
+When value is empty (default), YARN NodeManager will try to locate
+discovery executable itself.
+An example of the config value is: `/usr/local/bin/nvidia-smi`
+
+**3) Docker Plugin Related Configs**
+
+Following configs can be customized when user needs to run GPU applications 
inside Docker container. They're not required if admin follows default 
installation/configuration of `nvidia-docker`.
+
+| Property | Default value |
+| --- | --- |
+| yarn.nodemanager.resource-plugins.gpu.docker-plugin | nvidia-docker-v1 |
+
+Specify docker command plugin for GPU. By default uses Nvidia docker V1.0.
+
+| Property | Default value |
+| --- | --- |
+| 
yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidia-docker-v1.endpoint | 
http://localhost:3476/v1.0/docker/cli |
+
+Specify end point of `nvidia-docker-plugin`. Please find documentation: 
https://github.com/NVIDIA/nvidia-docker/wiki For more details.
+
+**4) CGroups mount**
+

[12/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
new file mode 100644
index 000..0857980
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Encodes a value by interpreting it as a Long and converting it to bytes and
+ * decodes a set of bytes as a Long.
+ */
+public final class LongConverter implements NumericValueConverter,
+Serializable {
+
+  /**
+   * Added because we implement Comparator.
+   */
+  private static final long serialVersionUID = 1L;
+
+  public LongConverter() {
+  }
+
+  @Override
+  public byte[] encodeValue(Object value) throws IOException {
+if (!HBaseTimelineSchemaUtils.isIntegralValue(value)) {
+  throw new IOException("Expected integral value");
+}
+return Bytes.toBytes(((Number)value).longValue());
+  }
+
+  @Override
+  public Object decodeValue(byte[] bytes) throws IOException {
+if (bytes == null) {
+  return null;
+}
+return Bytes.toLong(bytes);
+  }
+
+  /**
+   * Compares two numbers as longs. If either number is null, it will be taken
+   * as 0.
+   *
+   * @param num1 the first {@code Long} to compare.
+   * @param num2 the second {@code Long} to compare.
+   * @return -1 if num1 is less than num2, 0 if num1 is equal to num2 and 1 if
+   * num1 is greater than num2.
+   */
+  @Override
+  public int compare(Number num1, Number num2) {
+return Long.compare((num1 == null) ? 0L : num1.longValue(),
+(num2 == null) ? 0L : num2.longValue());
+  }
+
+  @Override
+  public Number add(Number num1, Number num2, Number...numbers) {
+long sum = ((num1 == null) ? 0L : num1.longValue()) +
+((num2 == null) ? 0L : num2.longValue());
+for (Number num : numbers) {
+  sum = sum + ((num == null) ? 0L : num.longValue());
+}
+return sum;
+  }
+
+  /**
+   * Converts a timestamp into it's inverse timestamp to be used in (row) keys
+   * where we want to have the most recent timestamp in the top of the table
+   * (scans start at the most recent timestamp first).
+   *
+   * @param key value to be inverted so that the latest version will be first 
in
+   *  a scan.
+   * @return inverted long
+   */
+  public static long invertLong(long key) {
+return Long.MAX_VALUE - key;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongKeyConverter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongKeyConverter.java
 

[03/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
deleted file mode 100644
index 0edd6a5..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
+++ /dev/null
@@ -1,520 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Query;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
-import org.apache.hadoop.yarn.webapp.BadRequestException;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for application entities that are stored in the
- * application table.
- */
-class ApplicationEntityReader extends GenericEntityReader {
-  private static final ApplicationTable APPLICATION_TABLE =
-  new ApplicationTable();
-
-  public ApplicationEntityReader(TimelineReaderContext ctxt,
-  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-super(ctxt, entityFilters, toRetrieve);
-  }
-
-  public 

[29/59] [abbrv] hadoop git commit: HDFS-13167. DatanodeAdminManager Improvements. Contributed by BELUGA BEHR.

2018-02-26 Thread xyao
HDFS-13167. DatanodeAdminManager Improvements. Contributed by BELUGA BEHR.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f81cc0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f81cc0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f81cc0b

Branch: refs/heads/HDFS-7240
Commit: 6f81cc0beea00843b44424417f09d8ee12cd7bae
Parents: 17c592e
Author: Inigo Goiri 
Authored: Tue Feb 20 15:18:27 2018 -0800
Committer: Inigo Goiri 
Committed: Tue Feb 20 15:18:27 2018 -0800

--
 .../blockmanagement/DatanodeAdminManager.java   | 27 ++--
 1 file changed, 14 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f81cc0b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
index e338591..a1dff08 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
@@ -21,8 +21,9 @@ import static 
com.google.common.base.Preconditions.checkArgument;
 import static org.apache.hadoop.util.Time.monotonicNow;
 
 import java.util.AbstractList;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Queue;
@@ -139,7 +140,7 @@ public class DatanodeAdminManager {
 new ThreadFactoryBuilder().setNameFormat("DatanodeAdminMonitor-%d")
 .setDaemon(true).build());
 outOfServiceNodeBlocks = new TreeMap<>();
-pendingNodes = new LinkedList<>();
+pendingNodes = new ArrayDeque<>();
   }
 
   /**
@@ -219,7 +220,7 @@ public class DatanodeAdminManager {
 pendingNodes.add(node);
   }
 } else {
-  LOG.trace("startDecommission: Node {} in {}, nothing to do." +
+  LOG.trace("startDecommission: Node {} in {}, nothing to do.",
   node, node.getAdminState());
 }
   }
@@ -242,7 +243,7 @@ public class DatanodeAdminManager {
   pendingNodes.remove(node);
   outOfServiceNodeBlocks.remove(node);
 } else {
-  LOG.trace("stopDecommission: Node {} in {}, nothing to do." +
+  LOG.trace("stopDecommission: Node {} in {}, nothing to do.",
   node, node.getAdminState());
 }
   }
@@ -272,7 +273,7 @@ public class DatanodeAdminManager {
   // IN_MAINTENANCE to support maintenance expiration.
   pendingNodes.add(node);
 } else {
-  LOG.trace("startMaintenance: Node {} in {}, nothing to do." +
+  LOG.trace("startMaintenance: Node {} in {}, nothing to do.",
   node, node.getAdminState());
 }
   }
@@ -321,7 +322,7 @@ public class DatanodeAdminManager {
   pendingNodes.remove(node);
   outOfServiceNodeBlocks.remove(node);
 } else {
-  LOG.trace("stopMaintenance: Node {} in {}, nothing to do." +
+  LOG.trace("stopMaintenance: Node {} in {}, nothing to do.",
   node, node.getAdminState());
 }
   }
@@ -395,7 +396,7 @@ public class DatanodeAdminManager {
 for (DatanodeStorageInfo storage : storages) {
   final DatanodeDescriptor node = storage.getDatanodeDescriptor();
   nodeList.append(node);
-  nodeList.append(" ");
+  nodeList.append(' ');
 }
 NameNode.blockStateChangeLog.info(
 "Block: " + block + ", Expected Replicas: "
@@ -517,7 +518,7 @@ public class DatanodeAdminManager {
   final Iterator>
   it = new CyclicIteration<>(outOfServiceNodeBlocks,
   iterkey).iterator();
-  final LinkedList toRemove = new LinkedList<>();
+  final List toRemove = new ArrayList<>();
 
   while (it.hasNext() && !exceededNumBlocksPerCheck() && namesystem
   .isRunning()) {
@@ -583,12 +584,12 @@ public class DatanodeAdminManager {
   "A node is in an invalid state!");
 }
 LOG.debug("Node {} is sufficiently replicated and healthy, "
-+ "marked as {}.", dn.getAdminState());
++ "marked as {}.", dn, dn.getAdminState());
   } else {
 LOG.debug("Node {} {} healthy."
 + " It needs to replicate {} more blocks."
 + " {} is still in progress.", dn,
-isHealthy? "is": "isn't", 

[26/59] [abbrv] hadoop git commit: YARN-7940. Fixed a bug in ServiceAM ZooKeeper initialization. (Contributed by Billie Rinaldi)

2018-02-26 Thread xyao
YARN-7940. Fixed a bug in ServiceAM ZooKeeper initialization.
   (Contributed by Billie Rinaldi)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7280c5af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7280c5af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7280c5af

Branch: refs/heads/HDFS-7240
Commit: 7280c5af82d36a9be15448293210d871f680f55e
Parents: 8896d20
Author: Eric Yang 
Authored: Tue Feb 20 14:12:58 2018 -0500
Committer: Eric Yang 
Committed: Tue Feb 20 14:12:58 2018 -0500

--
 .../org/apache/hadoop/registry/client/impl/zk/CuratorService.java | 3 +++
 .../apache/hadoop/registry/client/impl/zk/RegistrySecurity.java   | 2 ++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7280c5af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java
index c81a0ee..2eb7aa5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java
@@ -288,6 +288,9 @@ public class CuratorService extends CompositeService
   registrySecurity.applySecurityEnvironment(builder);
   //log them
   securityConnectionDiagnostics = buildSecurityDiagnostics();
+  if (LOG.isDebugEnabled()) {
+LOG.debug(securityConnectionDiagnostics);
+  }
   framework = builder.build();
   framework.start();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7280c5af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
index 521d8a9..bb829d8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
@@ -762,6 +762,8 @@ public class RegistrySecurity extends AbstractService {
   LOG.info(
   "Enabling ZK sasl client: jaasClientEntry = " + jaasClientEntry
   + ", principal = " + principal + ", keytab = " + keytab);
+  break;
+
 default:
   clearZKSaslClientProperties();
   break;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[52/59] [abbrv] hadoop git commit: MAPREDUCE-7027: HadoopArchiveLogs shouldn't delete the original logs if the HAR creation fails. Contributed by Gergely Novák

2018-02-26 Thread xyao
MAPREDUCE-7027: HadoopArchiveLogs shouldn't delete the original logs if the HAR 
creation fails. Contributed by Gergely Novák


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68ce193e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68ce193e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68ce193e

Branch: refs/heads/HDFS-7240
Commit: 68ce193efcb595f75d7addf751559c806a5aa399
Parents: 51088d3
Author: Xuan Gong 
Authored: Fri Feb 23 14:37:26 2018 -0800
Committer: Xuan Gong 
Committed: Fri Feb 23 14:37:26 2018 -0800

--
 .../hadoop/tools/HadoopArchiveLogsRunner.java   |  26 ++-
 .../tools/TestHadoopArchiveLogsRunner.java  | 204 +++
 2 files changed, 141 insertions(+), 89 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ce193e/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java
--
diff --git 
a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java
 
b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java
index b3c2de6..b736694 100644
--- 
a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java
+++ 
b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.tools;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
@@ -65,6 +66,9 @@ public class HadoopArchiveLogsRunner implements Tool {
 
   private JobConf conf;
 
+  @VisibleForTesting
+  HadoopArchives hadoopArchives;
+
   private static final FsPermission HAR_DIR_PERM =
   new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE);
   private static final FsPermission HAR_INNER_FILES_PERM =
@@ -72,6 +76,7 @@ public class HadoopArchiveLogsRunner implements Tool {
 
   public HadoopArchiveLogsRunner(Configuration conf) {
 setConf(conf);
+hadoopArchives = new HadoopArchives(conf);
   }
 
   public static void main(String[] args) {
@@ -132,10 +137,10 @@ public class HadoopArchiveLogsRunner implements Tool {
 conf.set("mapreduce.framework.name", "local");
 // Set the umask so we get 640 files and 750 dirs
 conf.set("fs.permissions.umask-mode", "027");
-HadoopArchives ha = new HadoopArchives(conf);
+String harName = appId + ".har";
 String[] haArgs = {
 "-archiveName",
-appId + ".har",
+harName,
 "-p",
 remoteAppLogDir,
 "*",
@@ -146,15 +151,26 @@ public class HadoopArchiveLogsRunner implements Tool {
   sb.append("\n\t").append(haArg);
 }
 LOG.info(sb.toString());
-ha.run(haArgs);
+int exitCode = hadoopArchives.run(haArgs);
+if (exitCode != 0) {
+  LOG.warn("Failed to create archives for " + appId);
+  return -1;
+}
 
 FileSystem fs = null;
 // Move har file to correct location and delete original logs
 try {
   fs = FileSystem.get(conf);
-  Path harDest = new Path(remoteAppLogDir, appId + ".har");
+  Path harPath = new Path(workingDir, harName);
+  if (!fs.exists(harPath) ||
+  fs.listStatus(harPath).length == 0) {
+LOG.warn("The created archive \"" + harName +
+"\" is missing or empty.");
+return -1;
+  }
+  Path harDest = new Path(remoteAppLogDir, harName);
   LOG.info("Moving har to original location");
-  fs.rename(new Path(workingDir, appId + ".har"), harDest);
+  fs.rename(harPath, harDest);
   LOG.info("Deleting original logs");
   for (FileStatus original : fs.listStatus(new Path(remoteAppLogDir),
   new PathFilter() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ce193e/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java
--
diff --git 
a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java
 
b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java
index fad9b97..5369338 100644
--- 
a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java
+++ 
b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java
@@ -32,112 +32,148 @@ import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import 

[40/59] [abbrv] hadoop git commit: YARN-7916. Remove call to docker logs on failure in container-executor. Contributed by Shane Kumpf

2018-02-26 Thread xyao
YARN-7916. Remove call to docker logs on failure in container-executor. 
Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3132709b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3132709b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3132709b

Branch: refs/heads/HDFS-7240
Commit: 3132709b46a35f70cf5278f3ace677e6e18a1d03
Parents: 2bc3351
Author: Jason Lowe 
Authored: Wed Feb 21 16:54:02 2018 -0600
Committer: Jason Lowe 
Committed: Wed Feb 21 16:54:02 2018 -0600

--
 .../impl/container-executor.c   | 35 
 1 file changed, 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3132709b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 035c694..751949e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1435,20 +1435,16 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
   char *exit_code_file = NULL;
   char *docker_command_with_binary = NULL;
   char *docker_wait_command = NULL;
-  char *docker_logs_command = NULL;
   char *docker_inspect_command = NULL;
   char *docker_rm_command = NULL;
   char *docker_inspect_exitcode_command = NULL;
   int container_file_source =-1;
   int cred_file_source = -1;
-  int BUFFER_SIZE = 4096;
-  char buffer[BUFFER_SIZE];
 
   size_t command_size = MIN(sysconf(_SC_ARG_MAX), 128*1024);
 
   docker_command_with_binary = (char *) alloc_and_clear_memory(command_size, 
sizeof(char));
   docker_wait_command = (char *) alloc_and_clear_memory(command_size, 
sizeof(char));
-  docker_logs_command = (char *) alloc_and_clear_memory(command_size, 
sizeof(char));
   docker_inspect_command = (char *) alloc_and_clear_memory(command_size, 
sizeof(char));
   docker_rm_command = (char *) alloc_and_clear_memory(command_size, 
sizeof(char));
   docker_inspect_exitcode_command = (char *) 
alloc_and_clear_memory(command_size, sizeof(char));
@@ -1600,36 +1596,6 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
 goto cleanup;
   }
   fprintf(LOGFILE, "Exit code from docker inspect: %d\n", exit_code);
-  if(exit_code != 0) {
-fprintf(ERRORFILE, "Docker container exit code was not zero: %d\n",
-exit_code);
-snprintf(docker_logs_command, command_size, "%s logs --tail=250 %s",
-  docker_binary, container_id);
-FILE* logs = popen(docker_logs_command, "r");
-if(logs != NULL) {
-  clearerr(logs);
-  res = fread(buffer, BUFFER_SIZE, 1, logs);
-  if(res < 1) {
-fprintf(ERRORFILE, "%s %d %d\n",
-  "Unable to read from docker logs(ferror, feof):", ferror(logs), 
feof(logs));
-fflush(ERRORFILE);
-  }
-  else {
-fprintf(ERRORFILE, "%s\n", buffer);
-fflush(ERRORFILE);
-  }
-}
-else {
-  fprintf(ERRORFILE, "%s\n", "Failed to get output of docker logs");
-  fprintf(ERRORFILE, "Command was '%s'\n", docker_logs_command);
-  fprintf(ERRORFILE, "%s\n", strerror(errno));
-  fflush(ERRORFILE);
-}
-if(pclose(logs) != 0) {
-  fprintf(ERRORFILE, "%s\n", "Failed to fetch docker logs");
-  fflush(ERRORFILE);
-}
-  }
 
 cleanup:
 
@@ -1662,7 +1628,6 @@ cleanup:
   free(cred_file_dest);
   free(docker_command_with_binary);
   free(docker_wait_command);
-  free(docker_logs_command);
   free(docker_inspect_command);
   free(docker_rm_command);
   return exit_code;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/59] [abbrv] hadoop git commit: HADOOP-15235. Authentication Tokens should use HMAC instead of MAC (rkanter)

2018-02-26 Thread xyao
HADOOP-15235. Authentication Tokens should use HMAC instead of MAC (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/324e5a7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/324e5a7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/324e5a7c

Branch: refs/heads/HDFS-7240
Commit: 324e5a7cf2bdb6f93e7c6fd9023817528f243dcf
Parents: 84cea00
Author: Robert Kanter 
Authored: Tue Feb 20 17:24:37 2018 -0800
Committer: Robert Kanter 
Committed: Tue Feb 20 17:24:37 2018 -0800

--
 .../security/authentication/util/Signer.java| 22 +---
 1 file changed, 14 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/324e5a7c/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
index aa63e40..e7b19a4 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
@@ -14,8 +14,11 @@
 package org.apache.hadoop.security.authentication.util;
 
 import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.codec.binary.StringUtils;
 
-import java.nio.charset.Charset;
+import javax.crypto.Mac;
+import javax.crypto.spec.SecretKeySpec;
+import java.security.InvalidKeyException;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 
@@ -24,6 +27,7 @@ import java.security.NoSuchAlgorithmException;
  */
 public class Signer {
   private static final String SIGNATURE = "=";
+  private static final String SIGNING_ALGORITHM = "HmacSHA256";
 
   private SignerSecretProvider secretProvider;
 
@@ -86,25 +90,27 @@ public class Signer {
*/
   protected String computeSignature(byte[] secret, String str) {
 try {
-  MessageDigest md = MessageDigest.getInstance("SHA");
-  md.update(str.getBytes(Charset.forName("UTF-8")));
-  md.update(secret);
-  byte[] digest = md.digest();
-  return new Base64(0).encodeToString(digest);
-} catch (NoSuchAlgorithmException ex) {
+  SecretKeySpec key = new SecretKeySpec((secret), SIGNING_ALGORITHM);
+  Mac mac = Mac.getInstance(SIGNING_ALGORITHM);
+  mac.init(key);
+  byte[] sig = mac.doFinal(StringUtils.getBytesUtf8(str));
+  return new Base64(0).encodeToString(sig);
+} catch (NoSuchAlgorithmException | InvalidKeyException ex) {
   throw new RuntimeException("It should not happen, " + ex.getMessage(), 
ex);
 }
   }
 
   protected void checkSignatures(String rawValue, String originalSignature)
   throws SignerException {
+byte[] orginalSignatureBytes = StringUtils.getBytesUtf8(originalSignature);
 boolean isValid = false;
 byte[][] secrets = secretProvider.getAllSecrets();
 for (int i = 0; i < secrets.length; i++) {
   byte[] secret = secrets[i];
   if (secret != null) {
 String currentSignature = computeSignature(secret, rawValue);
-if (originalSignature.equals(currentSignature)) {
+if (MessageDigest.isEqual(orginalSignatureBytes,
+StringUtils.getBytesUtf8(currentSignature))) {
   isValid = true;
   break;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/59] [abbrv] hadoop git commit: YARN-7675. [UI2] Support loading pre-2.8 version /scheduler REST response for queue page. Contributed by Gergely Novák.

2018-02-26 Thread xyao
YARN-7675. [UI2] Support loading pre-2.8 version /scheduler REST response for 
queue page. Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc683952
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc683952
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc683952

Branch: refs/heads/HDFS-7240
Commit: cc683952d2c1730109497aa78dd53629e914d294
Parents: c36b4aa
Author: Sunil G 
Authored: Fri Feb 23 16:10:29 2018 +0530
Committer: Sunil G 
Committed: Fri Feb 23 16:10:29 2018 +0530

--
 .../serializers/yarn-queue/capacity-queue.js| 29 
 1 file changed, 24 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc683952/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
index b171c6e..e838255 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
@@ -54,6 +54,28 @@ export default DS.JSONAPISerializer.extend({
 });
   }
 
+  var partitions = [];
+  var partitionMap = {};
+  if ("capacities" in payload) {
+partitions = payload.capacities.queueCapacitiesByPartition.map(
+  cap => cap.partitionName || PARTITION_LABEL);
+partitionMap = 
payload.capacities.queueCapacitiesByPartition.reduce((init, cap) => {
+  init[cap.partitionName || PARTITION_LABEL] = cap;
+  return init;
+}, {});
+  } else {
+partitions = [PARTITION_LABEL];
+partitionMap[PARTITION_LABEL] = {
+  partitionName: "",
+  capacity: payload.capacity,
+  maxCapacity: payload.maxCapacity,
+  usedCapacity: payload.usedCapacity,
+  absoluteCapacity: 'absoluteCapacity' in payload ? 
payload.absoluteCapacity : payload.capacity,
+  absoluteMaxCapacity: 'absoluteMaxCapacity' in payload ? 
payload.absoluteMaxCapacity : payload.maxCapacity,
+  absoluteUsedCapacity: 'absoluteUsedCapacity' in payload ? 
payload.absoluteUsedCapacity : payload.usedCapacity,
+};
+  }
+
   var fixedPayload = {
 id: id,
 type: primaryModelClass.modelName, // yarn-queue
@@ -74,11 +96,8 @@ export default DS.JSONAPISerializer.extend({
   numPendingApplications: payload.numPendingApplications,
   numActiveApplications: payload.numActiveApplications,
   resources: payload.resources,
-  partitions: payload.capacities.queueCapacitiesByPartition.map(cap => 
cap.partitionName || PARTITION_LABEL),
-  partitionMap: 
payload.capacities.queueCapacitiesByPartition.reduce((init, cap) => {
-init[cap.partitionName || PARTITION_LABEL] = cap;
-return init;
-  }, {}),
+  partitions: partitions,
+  partitionMap: partitionMap,
   type: "capacity",
 },
 // Relationships


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/59] [abbrv] hadoop git commit: YARN-7813. Capacity Scheduler Intra-queue Preemption should be configurable for each queue. Contributed by Eric Payne

2018-02-26 Thread xyao
YARN-7813. Capacity Scheduler Intra-queue Preemption should be configurable for 
each queue. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94972150
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94972150
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94972150

Branch: refs/heads/HDFS-7240
Commit: 949721508467968d5f46170353716ad04349cd6f
Parents: b9a429b
Author: Jason Lowe 
Authored: Mon Feb 19 14:06:28 2018 -0600
Committer: Jason Lowe 
Committed: Mon Feb 19 14:06:28 2018 -0600

--
 .../hadoop/yarn/api/records/QueueInfo.java  | 35 +++
 .../src/main/proto/yarn_protos.proto|  1 +
 .../apache/hadoop/yarn/client/cli/QueueCLI.java |  6 ++
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  3 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java | 96 ++--
 .../api/records/impl/pb/QueueInfoPBImpl.java| 13 +++
 .../hadoop/yarn/api/TestPBImplRecords.java  |  2 +-
 .../capacity/IntraQueueCandidatesSelector.java  |  4 +-
 .../scheduler/capacity/AbstractCSQueue.java | 70 --
 .../scheduler/capacity/CSQueue.java | 16 +++-
 .../CapacitySchedulerConfiguration.java | 15 +++
 .../webapp/CapacitySchedulerPage.java   |  5 +-
 .../dao/CapacitySchedulerLeafQueueInfo.java |  6 ++
 .../TestConfigurationMutationACLPolicies.java   |  2 +-
 .../TestSchedulerApplicationAttempt.java|  2 +-
 .../scheduler/capacity/TestLeafQueue.java   |  2 +-
 .../webapp/TestRMWebServicesCapacitySched.java  |  2 +-
 .../src/site/markdown/CapacityScheduler.md  |  3 +-
 18 files changed, 257 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
index 897b442..57ea9bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
@@ -94,6 +94,26 @@ public abstract class QueueInfo {
 return queueInfo;
   }
 
+  @Private
+  @Unstable
+  public static QueueInfo newInstance(String queueName, float capacity,
+  float maximumCapacity, float currentCapacity,
+  List childQueues, List applications,
+  QueueState queueState, Set accessibleNodeLabels,
+  String defaultNodeLabelExpression, QueueStatistics queueStatistics,
+  boolean preemptionDisabled,
+  Map queueConfigurations,
+  boolean intraQueuePreemptionDisabled) {
+QueueInfo queueInfo = QueueInfo.newInstance(queueName, capacity,
+maximumCapacity, currentCapacity,
+childQueues, applications,
+queueState, accessibleNodeLabels,
+defaultNodeLabelExpression, queueStatistics,
+preemptionDisabled, queueConfigurations);
+queueInfo.setIntraQueuePreemptionDisabled(intraQueuePreemptionDisabled);
+return queueInfo;
+  }
+
   /**
* Get the name of the queue.
* @return name of the queue
@@ -261,4 +281,19 @@ public abstract class QueueInfo {
   @Unstable
   public abstract void setQueueConfigurations(
   Map queueConfigurations);
+
+
+  /**
+   * Get the intra-queue preemption status of the queue.
+   * @return if property is not in proto, return null;
+   *otherwise, return intra-queue preemption status of the queue
+   */
+  @Public
+  @Stable
+  public abstract Boolean getIntraQueuePreemptionDisabled();
+
+  @Private
+  @Unstable
+  public abstract void setIntraQueuePreemptionDisabled(
+  boolean intraQueuePreemptionDisabled);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index d573638..6ca800a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -563,6 +563,7 @@ message QueueInfoProto {
   optional QueueStatisticsProto queueStatistics = 10;
   optional 

[19/59] [abbrv] hadoop git commit: YARN-7937. Fix http method name in Cluster Application Timeout Update API example request. Contributed by Charan Hebri.

2018-02-26 Thread xyao
YARN-7937. Fix http method name in Cluster Application Timeout Update API 
example request. Contributed by Charan Hebri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87bdde69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87bdde69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87bdde69

Branch: refs/heads/HDFS-7240
Commit: 87bdde69431c19a22d79a767071f6ea47e1ceb3d
Parents: 9af30d4
Author: Rohith Sharma K S 
Authored: Sun Feb 18 14:01:23 2018 +0530
Committer: Rohith Sharma K S 
Committed: Sun Feb 18 14:01:23 2018 +0530

--
 .../hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87bdde69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index 09e4727..c43fe14 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -4366,7 +4366,7 @@ HTTP Request:
 
 ```json
   Accept: application/json
-  GET http://rm-http-address:port/ws/v1/cluster/apps/{appid}/timeout
+  PUT http://rm-http-address:port/ws/v1/cluster/apps/{appid}/timeout
   Content-Type: application/json
 {
 "timeout":
@@ -4404,7 +4404,7 @@ HTTP Request:
 
 ```xml
   Accept: application/xml
-  GET http://rm-http-address:port/ws/v1/cluster/apps/{appid}/timeout
+  PUT http://rm-http-address:port/ws/v1/cluster/apps/{appid}/timeout
   Content-Type: application/xml
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/59] [abbrv] hadoop git commit: HADOOP-15223. Replace Collections.EMPTY* with empty* when available

2018-02-26 Thread xyao
HADOOP-15223. Replace Collections.EMPTY* with empty* when available

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d4dde51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d4dde51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d4dde51

Branch: refs/heads/HDFS-7240
Commit: 4d4dde5112e9ee6b37cbdea17104c5a4c6870bd5
Parents: 87bdde6
Author: fang zhenyi 
Authored: Sun Feb 18 22:19:23 2018 +0900
Committer: Akira Ajisaka 
Committed: Sun Feb 18 22:19:39 2018 +0900

--
 .../org/apache/hadoop/crypto/key/KeyProvider.java |  6 ++
 .../hdfs/protocol/TestHdfsFileStatusMethods.java  |  2 +-
 .../lib/service/security/DummyGroupMapping.java   |  3 +--
 .../main/java/org/apache/hadoop/fs/s3a/Listing.java   |  2 +-
 .../java/org/apache/hadoop/fs/s3a/S3AFileSystem.java  |  2 +-
 .../apache/hadoop/tools/mapred/TestCopyCommitter.java |  2 +-
 .../hadoop/yarn/sls/scheduler/RMNodeWrapper.java  |  6 ++
 .../yarn/api/protocolrecords/AllocateRequest.java |  2 +-
 .../yarn/api/protocolrecords/AllocateResponse.java|  2 +-
 .../org/apache/hadoop/yarn/api/records/Container.java |  2 +-
 .../yarn/security/ContainerTokenIdentifier.java   |  2 +-
 .../server/api/protocolrecords/NMContainerStatus.java |  2 +-
 .../resourceplugin/ResourcePluginManager.java |  3 ++-
 .../server/resourcemanager/DefaultAMSProcessor.java   |  4 ++--
 .../ProportionalCapacityPreemptionPolicy.java | 14 ++
 .../rmapp/attempt/RMAppAttemptImpl.java   |  2 +-
 .../server/resourcemanager/rmnode/RMNodeImpl.java |  2 +-
 .../resourcemanager/rmnode/RMNodeStatusEvent.java |  3 +--
 .../constraint/processor/BatchedRequests.java |  2 +-
 .../constraint/processor/PlacementDispatcher.java |  4 ++--
 .../SingleConstraintAppPlacementAllocator.java|  3 +--
 .../resourcemanager/TestResourceTrackerService.java   |  5 ++---
 ...ortionalCapacityPreemptionPolicyMockFramework.java |  2 +-
 23 files changed, 34 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
index c1c371b..62cc381 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
@@ -199,9 +199,8 @@ public abstract class KeyProvider {
   return cipher;
 }
 
-@SuppressWarnings("unchecked")
 public Map getAttributes() {
-  return (attributes == null) ? Collections.EMPTY_MAP : attributes;
+  return (attributes == null) ? Collections.emptyMap() : attributes;
 }
 
 /**
@@ -370,9 +369,8 @@ public abstract class KeyProvider {
   return description;
 }
 
-@SuppressWarnings("unchecked")
 public Map getAttributes() {
-  return (attributes == null) ? Collections.EMPTY_MAP : attributes;
+  return (attributes == null) ? Collections.emptyMap() : attributes;
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java
index 3cc4190..683a1ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java
@@ -54,7 +54,7 @@ public class TestHdfsFileStatusMethods {
 assertEquals(fsM.stream()
 .map(MethodSignature::toString)
 .collect(joining("\n")),
-Collections.EMPTY_SET, fsM);
+Collections.emptySet(), fsM);
   }
 
   /** Map non-static, declared methods for this class to signatures. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java

[56/59] [abbrv] hadoop git commit: HDFS-12865. RequestHedgingProxyProvider should handle case when none of the proxies are available. Contributed by Mukul Kumar Singh.

2018-02-26 Thread xyao
HDFS-12865. RequestHedgingProxyProvider should handle case when none of the 
proxies are available. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c30a26ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c30a26ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c30a26ab

Branch: refs/heads/HDFS-7240
Commit: c30a26abc54df669a77e0219fd9b48a47c179a99
Parents: 1e84e46
Author: Arpit Agarwal 
Authored: Sat Feb 24 14:25:56 2018 -0800
Committer: Arpit Agarwal 
Committed: Sat Feb 24 14:25:56 2018 -0800

--
 .../ha/RequestHedgingProxyProvider.java |  6 +++
 .../ha/TestRequestHedgingProxyProvider.java | 45 
 2 files changed, 51 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c30a26ab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 010e9e5..7b9cd64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
+import java.io.IOException;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
@@ -87,6 +88,11 @@ public class RequestHedgingProxyProvider extends
 // Optimization : if only 2 proxies are configured and one had failed
 // over, then we dont need to create a threadpool etc.
 targetProxies.remove(toIgnore);
+if (targetProxies.size() == 0) {
+  LOG.trace("No valid proxies left");
+  throw new RemoteException(IOException.class.getName(),
+  "No valid proxies left. All NameNode proxies have failed over.");
+}
 if (targetProxies.size() == 1) {
   ProxyInfo proxyInfo = targetProxies.values().iterator().next();
   try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c30a26ab/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 65fbbf8..8d6b02d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -28,6 +28,7 @@ import java.util.Iterator;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -290,6 +291,50 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
+  public void testSingleProxyFailover() throws Exception {
+String singleNS = "mycluster-" + Time.monotonicNow();
+URI singleNNUri = new URI("hdfs://" + singleNS);
+Configuration singleConf = new Configuration();
+singleConf.set(HdfsClientConfigKeys.DFS_NAMESERVICES, singleNS);
+singleConf.set(HdfsClientConfigKeys.
+DFS_HA_NAMENODES_KEY_PREFIX + "." + singleNS, "nn1");
+
+singleConf.set(HdfsClientConfigKeys.
+DFS_NAMENODE_RPC_ADDRESS_KEY + "." + singleNS + ".nn1",
+RandomStringUtils.randomAlphabetic(8) + ".foo.bar:9820");
+ClientProtocol active = Mockito.mock(ClientProtocol.class);
+Mockito
+.when(active.getBlockLocations(Matchers.anyString(),
+Matchers.anyLong(), Matchers.anyLong()))
+.thenThrow(new RemoteException("java.io.FileNotFoundException",
+"File does not exist!"));
+
+RequestHedgingProxyProvider provider =
+new RequestHedgingProxyProvider<>(singleConf, singleNNUri,
+

[18/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed 
by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9af30d46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9af30d46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9af30d46

Branch: refs/heads/HDFS-7240
Commit: 9af30d46c6e82332a8eda20cb3eb5f987e25e7a2
Parents: a1e56a6
Author: Rohith Sharma K S 
Authored: Sat Feb 17 20:30:28 2018 +0530
Committer: Rohith Sharma K S 
Committed: Sat Feb 17 20:30:28 2018 +0530

--
 .../resources/assemblies/hadoop-yarn-dist.xml   |  22 +-
 hadoop-project/pom.xml  |  14 +-
 .../pom.xml |  26 +-
 ...stTimelineReaderWebServicesHBaseStorage.java |   8 +-
 .../storage/TestHBaseTimelineStorageApps.java   |  50 +-
 .../TestHBaseTimelineStorageEntities.java   |  89 +--
 .../storage/TestHBaseTimelineStorageSchema.java |  61 +-
 .../flow/TestHBaseStorageFlowActivity.java  |  24 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  42 +-
 .../flow/TestHBaseStorageFlowRunCompaction.java |  58 +-
 .../pom.xml | 219 ++
 .../reader/filter/TimelineFilterUtils.java  | 313 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  96 +++
 .../storage/HBaseTimelineWriterImpl.java| 611 
 .../storage/TimelineSchemaCreator.java  | 368 ++
 .../storage/application/ApplicationTableRW.java | 137 
 .../storage/application/package-info.java   |  28 +
 .../storage/apptoflow/AppToFlowTableRW.java |  92 +++
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/BaseTableRW.java | 167 +
 .../storage/common/ColumnRWHelper.java  | 487 +
 .../common/HBaseTimelineStorageUtils.java   | 121 +++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TypedBufferedMutator.java|  73 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityTableRW.java   | 136 
 .../storage/entity/package-info.java|  28 +
 .../storage/flow/FlowActivityTableRW.java   |  91 +++
 .../storage/flow/FlowRunTableRW.java| 102 +++
 .../storage/flow/package-info.java  |  29 +
 .../timelineservice/storage/package-info.java   |  28 +
 .../reader/AbstractTimelineStorageReader.java   | 159 
 .../storage/reader/ApplicationEntityReader.java | 523 +
 .../storage/reader/EntityTypeReader.java| 175 +
 .../reader/FlowActivityEntityReader.java| 186 +
 .../storage/reader/FlowRunEntityReader.java | 298 
 .../storage/reader/GenericEntityReader.java | 655 +
 .../reader/SubApplicationEntityReader.java  | 489 +
 .../storage/reader/TimelineEntityReader.java| 464 
 .../reader/TimelineEntityReaderFactory.java | 105 +++
 .../storage/reader/package-info.java|  28 +
 .../subapplication/SubApplicationTableRW.java   | 137 
 .../storage/subapplication/package-info.java|  28 +
 .../common/TestHBaseTimelineStorageUtils.java   |  33 +
 .../pom.xml | 132 
 .../storage/application/ApplicationColumn.java  | 101 +++
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 150 
 .../storage/application/ApplicationRowKey.java  | 251 +++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   |  60 ++
 .../storage/application/package-info.java   |  28 +
 .../storage/apptoflow/AppToFlowColumn.java  |  95 +++
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../apptoflow/AppToFlowColumnPrefix.java| 105 +++
 .../storage/apptoflow/AppToFlowRowKey.java  |  58 ++
 .../storage/apptoflow/AppToFlowTable.java   |  60 ++
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  97 +++
 .../storage/common/BaseTable.java   |  27 +
 .../timelineservice/storage/common/Column.java  |  56 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 101 +++
 .../storage/common/ColumnPrefix.java|  71 ++
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineSchemaUtils.java| 156 
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/KeyConverterToString.java|  38 +
 .../storage/common/LongConverter.java 

[05/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
deleted file mode 100644
index d385108..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
+++ /dev/null
@@ -1,249 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies partially qualified columns for the entity table.
- */
-public enum EntityColumnPrefix implements ColumnPrefix {
-
-  /**
-   * To store TimelineEntity getIsRelatedToEntities values.
-   */
-  IS_RELATED_TO(EntityColumnFamily.INFO, "s"),
-
-  /**
-   * To store TimelineEntity getRelatesToEntities values.
-   */
-  RELATES_TO(EntityColumnFamily.INFO, "r"),
-
-  /**
-   * To store TimelineEntity info values.
-   */
-  INFO(EntityColumnFamily.INFO, "i"),
-
-  /**
-   * Lifecycle events for an entity.
-   */
-  EVENT(EntityColumnFamily.INFO, "e", true),
-
-  /**
-   * Config column stores configuration with config key as the column name.
-   */
-  CONFIG(EntityColumnFamily.CONFIGS, null),
-
-  /**
-   * Metrics are stored with the metric name as the column name.
-   */
-  METRIC(EntityColumnFamily.METRICS, null, new LongConverter());
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-
-  /**
-   * Can be null for those cases where the provided column qualifier is the
-   * entire column name.
-   */
-  private final String columnPrefix;
-  private final byte[] columnPrefixBytes;
-
-  /**
-   * Private constructor, meant to be used by the enum definition.
-   *
-   * @param columnFamily that this column is stored in.
-   * @param columnPrefix for this column.
-   */
-  EntityColumnPrefix(ColumnFamily columnFamily,
-  String columnPrefix) {
-this(columnFamily, columnPrefix, false, GenericConverter.getInstance());
-  }
-
-  EntityColumnPrefix(ColumnFamily columnFamily,
-  String columnPrefix, boolean compondColQual) {
-this(columnFamily, columnPrefix, compondColQual,
-GenericConverter.getInstance());
-  }
-
-  EntityColumnPrefix(ColumnFamily columnFamily,
-  String columnPrefix, ValueConverter converter) {
-this(columnFamily, columnPrefix, false, converter);
-  }
-
-  /**
-   * Private constructor, meant to be used by the enum definition.
-   *
-   * @param columnFamily that this column is stored in.
-   * @param columnPrefix for this column.
-   * @param 

[49/59] [abbrv] hadoop git commit: HADOOP-9747. Reduce unnecessary UGI synchronization. Contributed by Daryn Sharp.

2018-02-26 Thread xyao
HADOOP-9747. Reduce unnecessary UGI synchronization. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59cf7588
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59cf7588
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59cf7588

Branch: refs/heads/HDFS-7240
Commit: 59cf7588779145ad5850ad63426743dfe03d8347
Parents: 3688e49
Author: Kihwal Lee 
Authored: Fri Feb 23 13:10:56 2018 -0600
Committer: Kihwal Lee 
Committed: Fri Feb 23 13:10:56 2018 -0600

--
 .../hadoop/fs/CommonConfigurationKeys.java  |  11 -
 .../hadoop/security/UserGroupInformation.java   | 898 +--
 .../src/main/resources/core-default.xml |  13 -
 .../hadoop/security/TestUGILoginFromKeytab.java | 404 -
 .../hadoop/security/TestUGIWithMiniKdc.java |  54 +-
 .../security/TestUserGroupInformation.java  | 113 ++-
 6 files changed, 942 insertions(+), 551 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59cf7588/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index ba6e4e2..043e52a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -355,17 +355,6 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final String HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS =
 "hadoop.user.group.metrics.percentiles.intervals";
 
-  /* When creating UGI with UserGroupInformation(Subject), treat the passed
-   * subject external if set to true, and assume the owner of the subject
-   * should do the credential renewal.
-   *
-   * This is a temporary config to solve the compatibility issue with
-   * HADOOP-13558 and HADOOP-13805 fix, see the jiras for discussions.
-   */
-  public static final String HADOOP_TREAT_SUBJECT_EXTERNAL_KEY =
-  "hadoop.treat.subject.external";
-  public static final boolean HADOOP_TREAT_SUBJECT_EXTERNAL_DEFAULT = false;
-
   public static final String RPC_METRICS_QUANTILE_ENABLE =
   "rpc.metrics.quantile.enable";
   public static final boolean RPC_METRICS_QUANTILE_ENABLE_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59cf7588/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 726e811..003a51c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -18,8 +18,6 @@
 package org.apache.hadoop.security;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
-import static 
org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_TREAT_SUBJECT_EXTERNAL_KEY;
-import static 
org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_TREAT_SUBJECT_EXTERNAL_DEFAULT;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_TOKEN_FILES;
@@ -42,12 +40,14 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
 
 import javax.security.auth.DestroyFailedException;
 import javax.security.auth.Subject;
@@ -56,6 +56,7 @@ import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.auth.kerberos.KerberosTicket;
 import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
+import javax.security.auth.login.Configuration.Parameters;
 

[54/59] [abbrv] hadoop git commit: HADOOP-13374. Add the L verification script. Contributed by Allen Wittenauer

2018-02-26 Thread xyao
HADOOP-13374. Add the L verification script. Contributed by Allen Wittenauer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/329a4fdd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/329a4fdd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/329a4fdd

Branch: refs/heads/HDFS-7240
Commit: 329a4fdd07ab007615f34c8e0e651360f988064d
Parents: 033f9c6
Author: Chris Douglas 
Authored: Fri Feb 23 17:07:22 2018 -0800
Committer: Chris Douglas 
Committed: Fri Feb 23 17:07:22 2018 -0800

--
 dev-support/bin/verify-license-files | 145 ++
 1 file changed, 145 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/329a4fdd/dev-support/bin/verify-license-files
--
diff --git a/dev-support/bin/verify-license-files 
b/dev-support/bin/verify-license-files
new file mode 100755
index 000..1fd70a6
--- /dev/null
+++ b/dev-support/bin/verify-license-files
@@ -0,0 +1,145 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+## @description  check a file
+## @audience private
+## @stabilityevolving
+## @replaceable  no
+## @paramfilename
+## @paramjarfile
+## @return   0 = destroy verify dir
+## @return   1 = keep verify dir
+function process_file
+{
+  declare check=$1
+  declare fqfn=$2
+  declare fn
+  declare keepdir
+  declare tf
+  declare count
+  declare valid
+
+  fn=$(basename "${fqfn}")
+  keepdir=false
+  valid=0
+  count=0
+
+  unzip -o -d "${WORK_DIR}/${fn}" "${fqfn}" '*'"${check}"'*' >/dev/null 2>&1
+
+  while read -r tf; do
+((count = count + 1))
+if diff -q "${DIST_DIR}/${check}.txt" "${tf}" >/dev/null 2>&1; then
+  ((valid = valid + 1))
+fi
+  done < <(find "${WORK_DIR}/${fn}" -name "${check}"'*')
+
+  if [[ "${count}" -eq 0 ]]; then
+hadoop_error "ERROR: ${fn}: Missing a ${check} file"
+  elif [[ "${count}" -gt 1 ]]; then
+hadoop_error "WARNING: ${fn}: Found ${count} ${check} files (${valid} were 
valid)"
+keepdir=true
+  fi
+
+  if [[ "${valid}" -eq 0 ]] && [[ "${count}" -gt 0 ]]; then
+  hadoop_error "ERROR: ${fn}: No valid ${check} found"
+  keepdir=true
+  fi
+
+  if [[ "${keepdir}" = "false" ]]; then
+return 0
+  else
+return 1
+  fi
+}
+
+
+## @description  check a jar
+## @audience private
+## @stabilityevolving
+## @replaceable  no
+## @paramjarfile
+## @return   0 - success
+## @return   1 - errors
+function process_jar
+{
+  declare fqfn=$1
+  declare fn
+  declare keepwork
+
+  fn=$(basename "${fqfn}")
+  keepwork=false
+
+  if [[ ! ${fn} =~ hadoop-.*-${PROJ_VERSION} ]]; then
+return
+  fi
+
+  mkdir -p "${WORK_DIR}/${fn}"
+
+  if ! process_file LICENSE "${fqfn}"; then
+keepwork=true
+  fi
+
+  if ! process_file NOTICE "${fqfn}"; then
+keepwork=true
+  fi
+
+  if [[ "${keepwork}" = "false" ]]; then
+rm -rf "${WORK_DIR:?}/${fn}"
+return 0
+  else
+hadoop_error ""
+return 1
+  fi
+}
+
+
+MYNAME="${BASH_SOURCE-$0}"
+#shellcheck disable=SC2034
+HADOOP_SHELL_EXECNAME="${MYNAME##*/}"
+BINDIR=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
+#shellcheck disable=SC2034
+HADOOP_LIBEXEC_DIR="${BINDIR}/../../hadoop-common-project/hadoop-common/src/main/bin"
+
+#shellcheck disable=SC1090
+. "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh"
+
+HADOOP_LIBEXEC_DIR=$(hadoop_abs "${HADOOP_LIBEXEC_DIR}")
+BINDIR=$(hadoop_abs "${BINDIR}")
+BASEDIR=$(hadoop_abs "${BINDIR}/../..")
+
+pushd "${BASEDIR}" >/dev/null
+#shellcheck disable=SC2016
+PROJ_VERSION=$(mvn -q -Dexec.executable="echo" 
-Dexec.args='${project.version}' --non-recursive exec:exec)
+popd >/dev/null
+
+DIST_DIR="${BASEDIR}/hadoop-dist/target/hadoop-${PROJ_VERSION}"
+WORK_DIR="${BASEDIR}/patchprocess/verify"
+
+rm -rf "${WORK_DIR:?}"
+mkdir -p "${WORK_DIR}"
+
+while read -r filename; do
+  process_jar "${filename}"
+  ((ret = ret + $? ))
+done < <(find "${DIST_DIR}" \
+  -name 

[10/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
new file mode 100644
index 000..fb1f774
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the sub app table.
+ */
+public class SubApplicationRowKey {
+  private final String subAppUserId;
+  private final String clusterId;
+  private final String entityType;
+  private final Long entityIdPrefix;
+  private final String entityId;
+  private final String userId;
+  private final SubApplicationRowKeyConverter subAppRowKeyConverter =
+  new SubApplicationRowKeyConverter();
+
+  public SubApplicationRowKey(String subAppUserId, String clusterId,
+  String entityType, Long entityIdPrefix, String entityId, String userId) {
+this.subAppUserId = subAppUserId;
+this.clusterId = clusterId;
+this.entityType = entityType;
+this.entityIdPrefix = entityIdPrefix;
+this.entityId = entityId;
+this.userId = userId;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public String getSubAppUserId() {
+return subAppUserId;
+  }
+
+  public String getEntityType() {
+return entityType;
+  }
+
+  public String getEntityId() {
+return entityId;
+  }
+
+  public Long getEntityIdPrefix() {
+return entityIdPrefix;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  /**
+   * Constructs a row key for the sub app table as follows:
+   * {@code subAppUserId!clusterId!entityType
+   * !entityPrefix!entityId!userId}.
+   * Typically used while querying a specific sub app.
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that the AM runs as.
+   *
+   * @return byte array with the row key.
+   */
+  public byte[] getRowKey() {
+return subAppRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey byte representation of row key.
+   * @return An SubApplicationRowKey object.
+   */
+  public static SubApplicationRowKey parseRowKey(byte[] rowKey) {
+return new SubApplicationRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Constructs a row key for the sub app table as follows:
+   * 
+   * {@code subAppUserId!clusterId!
+   * entityType!entityIdPrefix!entityId!userId}.
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that that the AM runs as.
+   *
+   * 
+   *
+   * @return String representation of row key.
+   */
+  public String getRowKeyAsString() {
+return subAppRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as 

[22/59] [abbrv] hadoop git commit: Revert "YARN-7677. Docker image cannot set HADOOP_CONF_DIR. Contributed by Jim Brennan"

2018-02-26 Thread xyao
Revert "YARN-7677. Docker image cannot set HADOOP_CONF_DIR. Contributed by Jim 
Brennan"

This reverts commit 8013475d447a8377b5aed858208bf8b91dd32366.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9a429bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9a429bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9a429bb

Branch: refs/heads/HDFS-7240
Commit: b9a429bb2854910add8d4cf787e6ee65ebdfc9cf
Parents: 83e2bb9
Author: Jason Lowe 
Authored: Mon Feb 19 08:16:25 2018 -0600
Committer: Jason Lowe 
Committed: Mon Feb 19 08:16:25 2018 -0600

--
 .../java/org/apache/hadoop/yarn/util/Apps.java  |  22 +--
 .../yarn/util/AuxiliaryServiceHelper.java   |   2 +-
 .../server/nodemanager/ContainerExecutor.java   |  62 +++--
 .../nodemanager/LinuxContainerExecutor.java |   8 ++
 .../launcher/ContainerLaunch.java   |  88 
 .../runtime/DefaultLinuxContainerRuntime.java   |   6 +
 .../DelegatingLinuxContainerRuntime.java|  11 ++
 .../runtime/DockerLinuxContainerRuntime.java|   7 +
 .../runtime/ContainerRuntime.java   |  11 ++
 .../launcher/TestContainerLaunch.java   | 133 ++-
 10 files changed, 110 insertions(+), 240 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
index 1c90d55..685c6d3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
@@ -23,7 +23,6 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.util.StringHelper.sjoin;
 
 import java.io.File;
-import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.regex.Matcher;
@@ -106,26 +105,7 @@ public class Apps {
   }
 }
   }
-
-  /**
-   *
-   * @param envString String containing env variable definitions
-   * @param classPathSeparator String that separates the definitions
-   * @return ArrayList of environment variable names
-   */
-  public static ArrayList getEnvVarsFromInputString(String envString,
-  String classPathSeparator) {
-ArrayList envList = new ArrayList<>();
-if (envString != null && envString.length() > 0) {
-  Matcher varValMatcher = VARVAL_SPLITTER.matcher(envString);
-  while (varValMatcher.find()) {
-String envVar = varValMatcher.group(1);
-envList.add(envVar);
-  }
-}
-return envList;
-  }
-
+  
   /**
* This older version of this method is kept around for compatibility
* because downstream frameworks like Spark and Tez have been using it.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
index 1374d96..cb118f5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
@@ -45,7 +45,7 @@ public class AuxiliaryServiceHelper {
 Base64.encodeBase64String(byteData));
   }
 
-  public static String getPrefixServiceName(String serviceName) {
+  private static String getPrefixServiceName(String serviceName) {
 return NM_AUX_SERVICE + serviceName;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 

[28/59] [abbrv] hadoop git commit: HDFS-13168. XmlImageVisitor - Prefer Array over LinkedList. Contributed by BELUGA BEHR.

2018-02-26 Thread xyao
HDFS-13168. XmlImageVisitor - Prefer Array over LinkedList. Contributed by 
BELUGA BEHR.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17c592e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17c592e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17c592e6

Branch: refs/heads/HDFS-7240
Commit: 17c592e6cfd1ea3dbe9671c4703caabd095d87cf
Parents: 9028cca
Author: Inigo Goiri 
Authored: Tue Feb 20 15:16:01 2018 -0800
Committer: Inigo Goiri 
Committed: Tue Feb 20 15:16:01 2018 -0800

--
 .../tools/offlineImageViewer/XmlImageVisitor.java | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17c592e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java
index 44593a3..a326049 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java
@@ -18,16 +18,17 @@
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
 import java.io.IOException;
-import java.util.LinkedList;
+import java.util.ArrayDeque;
+import java.util.Deque;
 
 import org.apache.hadoop.hdfs.util.XMLUtils;
+
 /**
  * An XmlImageVisitor walks over an fsimage structure and writes out
  * an equivalent XML document that contains the fsimage's components.
  */
 public class XmlImageVisitor extends TextWriterImageVisitor {
-  final private LinkedList tagQ =
-  new LinkedList();
+  final private Deque tagQ = new ArrayDeque<>();
 
   public XmlImageVisitor(String filename) throws IOException {
 super(filename, false);
@@ -51,9 +52,10 @@ public class XmlImageVisitor extends TextWriterImageVisitor {
 
   @Override
   void leaveEnclosingElement() throws IOException {
-if(tagQ.size() == 0)
+if (tagQ.isEmpty()) {
   throw new IOException("Tried to exit non-existent enclosing element " +
-"in FSImage file");
+  "in FSImage file");
+}
 
 ImageElement element = tagQ.pop();
 write("\n");
@@ -71,7 +73,7 @@ public class XmlImageVisitor extends TextWriterImageVisitor {
 
   @Override
   void visitEnclosingElement(ImageElement element) throws IOException {
-write("<" + element.toString() + ">\n");
+write('<' + element.toString() + ">\n");
 tagQ.push(element);
   }
 
@@ -79,12 +81,12 @@ public class XmlImageVisitor extends TextWriterImageVisitor 
{
   void visitEnclosingElement(ImageElement element,
   ImageElement key, String value)
throws IOException {
-write("<" + element.toString() + " " + key + "=\"" + value +"\">\n");
+write('<' + element.toString() + ' ' + key + "=\"" + value +"\">\n");
 tagQ.push(element);
   }
 
   private void writeTag(String tag, String value) throws IOException {
-write("<" + tag + ">" +
+write('<' + tag + '>' +
 XMLUtils.mangleXmlString(value, true) + "\n");
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/59] [abbrv] hadoop git commit: YARN-7949. [UI2] ArtifactsId should not be a compulsory field for new service. Contributed by Yesha Vora.

2018-02-26 Thread xyao
YARN-7949. [UI2] ArtifactsId should not be a compulsory field for new service. 
Contributed by Yesha Vora.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1cd5736
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1cd5736
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1cd5736

Branch: refs/heads/HDFS-7240
Commit: d1cd573687fa3466a5ceb9a525141a8c3a8f686f
Parents: cc68395
Author: Sunil G 
Authored: Fri Feb 23 16:50:02 2018 +0530
Committer: Sunil G 
Committed: Fri Feb 23 16:50:02 2018 +0530

--
 .../main/webapp/app/components/service-component-table.js |  2 +-
 .../src/main/webapp/app/models/yarn-servicedef.js | 10 ++
 .../app/templates/components/service-component-table.hbs  |  2 +-
 3 files changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1cd5736/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js
index 5a9ae30..23c2cfb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js
@@ -52,5 +52,5 @@ export default Ember.Component.extend({
 return !Ember.isNone(item);
   },
 
-  isValidCurrentComponent: Ember.computed.and('currentComponent', 
'currentComponent.name', 'currentComponent.cpus', 'currentComponent.memory', 
'currentComponent.numOfContainers', 'currentComponent.artifactId', 
'currentComponent.launchCommand')
+  isValidCurrentComponent: Ember.computed.and('currentComponent', 
'currentComponent.name', 'currentComponent.cpus', 'currentComponent.memory', 
'currentComponent.numOfContainers', 'currentComponent.launchCommand')
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1cd5736/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
index 0439fb4..19c74e1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
@@ -189,10 +189,12 @@ export default DS.Model.extend({
 json['number_of_containers'] = record.get('numOfContainers');
 json['launch_command'] = record.get('launchCommand');
 json['dependencies'] = [];
-json['artifact'] = {
-  id: record.get('artifactId'),
-  type: record.get('artifactType')
-};
+if (!Ember.isEmpty(record.get('artifactId'))) {
+  json['artifact'] = {
+id: record.get('artifactId'),
+type: record.get('artifactType')
+  };
+}
 json['resource'] = {
   cpus: record.get('cpus'),
   memory: record.get('memory')

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1cd5736/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
index 8f3904d..9d519ae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
@@ -90,7 +90,7 @@
   {{input type="number" min="0" class="form-control" 
value=currentComponent.numOfContainers}}
 
 
-  Artifact Id
+  Artifact Id
   {{input type="text" class="form-control" 
value=currentComponent.artifactId}}
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/59] [abbrv] hadoop git commit: HDFS-13119. RBF: Manage unavailable clusters. Contributed by Yiqun Lin.

2018-02-26 Thread xyao
HDFS-13119. RBF: Manage unavailable clusters. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8896d20b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8896d20b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8896d20b

Branch: refs/heads/HDFS-7240
Commit: 8896d20b91520053a6bbfb680adb345cd24f4142
Parents: 1d37cf6
Author: Yiqun Lin 
Authored: Tue Feb 20 09:37:08 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Feb 20 09:37:08 2018 +0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +
 .../federation/metrics/FederationRPCMBean.java  |   2 +
 .../metrics/FederationRPCMetrics.java   |  11 ++
 .../FederationRPCPerformanceMonitor.java|  10 ++
 .../resolver/NamenodeStatusReport.java  |   8 +
 .../federation/router/RouterRpcClient.java  |  71 +++--
 .../federation/router/RouterRpcMonitor.java |  13 ++
 .../federation/router/RouterRpcServer.java  |   9 ++
 .../src/main/resources/hdfs-default.xml |  17 +++
 .../router/TestRouterRPCClientRetries.java  | 151 +++
 10 files changed, 289 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0828957..bea38d2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1246,6 +1246,14 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final long DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS_DEFAULT =
   TimeUnit.SECONDS.toMillis(10);
 
+  // HDFS Router RPC client
+  public static final String DFS_ROUTER_CLIENT_THREADS_SIZE =
+  FEDERATION_ROUTER_PREFIX + "client.thread-size";
+  public static final int DFS_ROUTER_CLIENT_THREADS_SIZE_DEFAULT = 32;
+  public static final String DFS_ROUTER_CLIENT_MAX_ATTEMPTS =
+  FEDERATION_ROUTER_PREFIX + "client.retry.max.attempts";
+  public static final int DFS_ROUTER_CLIENT_MAX_ATTEMPTS_DEFAULT = 3;
+
   // HDFS Router State Store connection
   public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS =
   FEDERATION_ROUTER_PREFIX + "file.resolver.client.class";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
index 00209e9..3e031fe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
@@ -42,6 +42,8 @@ public interface FederationRPCMBean {
 
   long getProxyOpNotImplemented();
 
+  long getProxyOpRetries();
+
   long getRouterFailureStateStoreOps();
 
   long getRouterFailureReadOnlyOps();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
index 8995689..94d3383 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
@@ -56,6 +56,8 @@ public class FederationRPCMetrics implements 
FederationRPCMBean {
   private MutableCounterLong proxyOpFailureCommunicate;
   @Metric("Number of operations not implemented")
   private MutableCounterLong proxyOpNotImplemented;
+  @Metric("Number of operation retries")
+  private MutableCounterLong proxyOpRetries;
 
   @Metric("Failed requests due to State 

[02/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
deleted file mode 100644
index e780dcc..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
+++ /dev/null
@@ -1,488 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Query;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
-import org.apache.hadoop.yarn.webapp.BadRequestException;
-
-import com.google.common.base.Preconditions;
-
-class SubApplicationEntityReader extends GenericEntityReader {
-  private static final SubApplicationTable SUB_APPLICATION_TABLE =
-  new SubApplicationTable();
-
-  SubApplicationEntityReader(TimelineReaderContext ctxt,
-  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-super(ctxt, entityFilters, toRetrieve);
-  }
-
-  SubApplicationEntityReader(TimelineReaderContext ctxt,
-  TimelineDataToRetrieve toRetrieve) {
-super(ctxt, toRetrieve);
-  }
-
-  /**
-   * Uses the {@link SubApplicationTable}.
- 

[59/59] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2018-02-26 Thread xyao
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e634d49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e634d49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e634d49

Branch: refs/heads/HDFS-7240
Commit: 1e634d49da09b22af845cede4951f867590db6a8
Parents: 7448475 451265a
Author: Xiaoyu Yao 
Authored: Mon Feb 26 11:32:26 2018 -0800
Committer: Xiaoyu Yao 
Committed: Mon Feb 26 11:32:26 2018 -0800

--
 dev-support/bin/verify-license-files| 145 +++
 .../resources/assemblies/hadoop-yarn-dist.xml   |  22 +-
 .../hadoop-client-minicluster/pom.xml   |   1 +
 .../client/KerberosAuthenticator.java   |  80 +-
 .../security/authentication/util/Signer.java|  22 +-
 .../client/TestKerberosAuthenticator.java   |  29 +
 .../org/apache/hadoop/conf/ConfServlet.java |  11 +-
 .../org/apache/hadoop/conf/Configuration.java   | 146 +--
 .../org/apache/hadoop/conf/CorePropertyTag.java |  37 -
 .../org/apache/hadoop/conf/HDFSPropertyTag.java |  41 -
 .../apache/hadoop/conf/OzonePropertyTag.java|  49 -
 .../org/apache/hadoop/conf/PropertyTag.java |  30 -
 .../org/apache/hadoop/conf/YarnPropertyTag.java |  39 -
 .../apache/hadoop/crypto/key/KeyProvider.java   |   6 +-
 .../hadoop/fs/CommonConfigurationKeys.java  |  11 -
 .../fs/CommonConfigurationKeysPublic.java   |   2 +
 .../apache/hadoop/io/compress/BZip2Codec.java   |  33 +-
 .../java/org/apache/hadoop/log/Log4Json.java|   2 +-
 .../security/RuleBasedLdapGroupsMapping.java|  91 ++
 .../hadoop/security/UserGroupInformation.java   | 898 +--
 .../src/main/resources/core-default.xml |  26 +-
 .../site/markdown/AdminCompatibilityGuide.md| 278 ++
 .../src/site/markdown/Compatibility.md  |  33 +-
 .../conf/TestCommonConfigurationFields.java |   4 +-
 .../org/apache/hadoop/conf/TestConfServlet.java |  58 --
 .../apache/hadoop/conf/TestConfiguration.java   | 121 ++-
 .../apache/hadoop/fs/TestFileSystemCaching.java | 233 +++--
 .../hadoop/http/TestHttpServerWithSpengo.java   |   5 +-
 .../org/apache/hadoop/log/TestLogLevel.java |  18 +-
 .../TestRuleBasedLdapGroupsMapping.java |  99 ++
 .../hadoop/security/TestUGILoginFromKeytab.java | 404 -
 .../hadoop/security/TestUGIWithMiniKdc.java |  54 +-
 .../security/TestUserGroupInformation.java  | 113 ++-
 .../delegation/web/TestWebDelegationToken.java  |   4 +-
 .../hadoop/crypto/key/kms/server/KMSWebApp.java |   2 +-
 .../hadoop/hdfs/DFSOpsCountStatistics.java  |   1 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  63 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  87 ++
 .../hadoop/hdfs/client/impl/LeaseRenewer.java   |   2 +-
 .../hdfs/protocol/SnapshotDiffReport.java   |   4 +
 .../ha/RequestHedgingProxyProvider.java |   8 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  49 +
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  15 +
 .../hadoop/hdfs/web/resources/GetOpParam.java   |   3 +-
 .../protocol/TestHdfsFileStatusMethods.java |   2 +-
 .../ha/TestRequestHedgingProxyProvider.java |  45 +
 .../lib/service/security/DummyGroupMapping.java |   3 +-
 .../hadoop-hdfs/src/main/bin/start-dfs.sh   |   2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  10 +-
 .../blockmanagement/DatanodeAdminManager.java   |  27 +-
 .../server/datanode/BlockRecoveryWorker.java|   6 +-
 .../hdfs/server/datanode/ReplicaInfo.java   |   1 -
 .../diskbalancer/command/PlanCommand.java   |  16 +-
 .../connectors/DBNameNodeConnector.java |   2 -
 .../datamodel/DiskBalancerVolume.java   |   4 +-
 .../federation/metrics/FederationRPCMBean.java  |   2 +
 .../metrics/FederationRPCMetrics.java   |  11 +
 .../FederationRPCPerformanceMonitor.java|  10 +
 .../resolver/NamenodeStatusReport.java  |   8 +
 .../federation/router/RouterRpcClient.java  |  71 +-
 .../federation/router/RouterRpcMonitor.java |  13 +
 .../federation/router/RouterRpcServer.java  |   9 +
 .../store/driver/StateStoreDriver.java  |   2 +-
 .../server/namenode/EncryptionZoneManager.java  |   3 +-
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |  28 +-
 .../hdfs/server/namenode/FSDirAttrOp.java   |  54 +-
 .../hdfs/server/namenode/FSDirConcatOp.java |   5 +-
 .../hdfs/server/namenode/FSDirDeleteOp.java |   8 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  |  12 +-
 .../hdfs/server/namenode/FSDirMkdirOp.java  |   3 +-
 .../hdfs/server/namenode/FSDirRenameOp.java |  11 +-
 .../hdfs/server/namenode/FSDirSnapshotOp.java   |  38 +-
 .../server/namenode/FSDirStatAndListingOp.java  |  35 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  35 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 190 

[39/59] [abbrv] hadoop git commit: HADOOP-6852. apparent bug in concatenated-bzip2 support (decoding). Contributed by Zsolt Venczel.

2018-02-26 Thread xyao
HADOOP-6852. apparent bug in concatenated-bzip2 support (decoding). Contributed 
by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bc3351e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bc3351e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bc3351e

Branch: refs/heads/HDFS-7240
Commit: 2bc3351eaf240ea685bcf5042d79f1554bf89e00
Parents: 92cbbfe
Author: Sean Mackrory 
Authored: Wed Feb 21 12:53:18 2018 -0700
Committer: Sean Mackrory 
Committed: Wed Feb 21 12:57:14 2018 -0700

--
 .../hadoop-client-minicluster/pom.xml   |   1 +
 .../apache/hadoop/io/compress/BZip2Codec.java   |   3 +-
 .../mapred/TestConcatenatedCompressedInput.java |  84 +--
 .../src/test/resources/testdata/concat.bz2  | Bin 0 -> 208 bytes
 .../src/test/resources/testdata/concat.gz   | Bin 0 -> 148 bytes
 .../testdata/testCompressThenConcat.txt.bz2 | Bin 0 -> 3056 bytes
 .../testdata/testCompressThenConcat.txt.gz  | Bin 0 -> 3413 bytes
 .../testdata/testConcatThenCompress.txt.bz2 | Bin 0 -> 2567 bytes
 .../testdata/testConcatThenCompress.txt.gz  | Bin 0 -> 2734 bytes
 9 files changed, 42 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc3351e/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 905d53a..a443648 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -615,6 +615,7 @@
   
 testjar/*
 testshell/*
+testdata/*
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc3351e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
index 3c78cfc..99590ed 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
@@ -180,7 +180,8 @@ public class BZip2Codec implements Configurable, 
SplittableCompressionCodec {
   new DecompressorStream(in, decompressor,
  conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
  IO_FILE_BUFFER_SIZE_DEFAULT)) :
-  new BZip2CompressionInputStream(in);
+  new BZip2CompressionInputStream(
+  in, 0L, Long.MAX_VALUE, READ_MODE.BYBLOCK);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc3351e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
index 977d083..af6b952 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
@@ -18,18 +18,6 @@
 
 package org.apache.hadoop.mapred;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayInputStream;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.zip.Inflater;
-
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
@@ -42,16 +30,26 @@ import org.apache.hadoop.io.compress.zlib.ZlibFactory;
 import org.apache.hadoop.util.LineReader;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.junit.After;
-import org.junit.Ignore;
 import 

[43/59] [abbrv] hadoop git commit: YARN-7942. Add check for JAAS configuration for Yarn Service. Contributed by Billie Rinaldi

2018-02-26 Thread xyao
YARN-7942. Add check for JAAS configuration for Yarn Service.
   Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95904f6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95904f6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95904f6b

Branch: refs/heads/HDFS-7240
Commit: 95904f6b3ccd1d167088086472eabdd85b2d148d
Parents: 1909690
Author: Eric Yang 
Authored: Thu Feb 22 16:12:40 2018 -0500
Committer: Eric Yang 
Committed: Thu Feb 22 16:12:40 2018 -0500

--
 .../client/impl/zk/RegistrySecurity.java| 44 +++-
 1 file changed, 33 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95904f6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
index bb829d8..5c6c983 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
@@ -736,8 +736,10 @@ public class RegistrySecurity extends AbstractService {
* Apply the security environment to this curator instance. This
* may include setting up the ZK system properties for SASL
* @param builder curator builder
+   * @throws IOException if jaas configuration can't be generated or found
*/
-  public void applySecurityEnvironment(CuratorFrameworkFactory.Builder 
builder) {
+  public void applySecurityEnvironment(CuratorFrameworkFactory.Builder
+  builder) throws IOException {
 
 if (isSecureRegistry()) {
   switch (access) {
@@ -752,16 +754,36 @@ public class RegistrySecurity extends AbstractService {
   break;
 
 case sasl:
-  JaasConfiguration jconf =
-  new JaasConfiguration(jaasClientEntry, principal, keytab);
-  javax.security.auth.login.Configuration.setConfiguration(jconf);
-  setSystemPropertyIfUnset(ZooKeeperSaslClient.ENABLE_CLIENT_SASL_KEY,
-  "true");
-  setSystemPropertyIfUnset(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
-  jaasClientEntry);
-  LOG.info(
-  "Enabling ZK sasl client: jaasClientEntry = " + jaasClientEntry
-  + ", principal = " + principal + ", keytab = " + keytab);
+  String existingJaasConf = System.getProperty(
+  "java.security.auth.login.config");
+  if (existingJaasConf == null || existingJaasConf.isEmpty()) {
+if (principal == null || keytab == null) {
+  throw new IOException("SASL is configured for registry, " +
+  "but neither keytab/principal nor java.security.auth.login" +
+  ".config system property are specified");
+}
+// in this case, keytab and principal are specified and no jaas
+// config is specified, so we will create one
+LOG.info(
+"Enabling ZK sasl client: jaasClientEntry = " + jaasClientEntry
++ ", principal = " + principal + ", keytab = " + keytab);
+JaasConfiguration jconf =
+new JaasConfiguration(jaasClientEntry, principal, keytab);
+javax.security.auth.login.Configuration.setConfiguration(jconf);
+
setSystemPropertyIfUnset(ZooKeeperSaslClient.ENABLE_CLIENT_SASL_KEY,
+"true");
+
setSystemPropertyIfUnset(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
+jaasClientEntry);
+  } else {
+// in this case, jaas config is specified so we will not change it
+LOG.info("Using existing ZK sasl configuration: " +
+"jaasClientEntry = " + System.getProperty(
+ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, "Client") +
+", sasl client = " + System.getProperty(
+ZooKeeperSaslClient.ENABLE_CLIENT_SASL_KEY,
+ZooKeeperSaslClient.ENABLE_CLIENT_SASL_DEFAULT) +
+", jaas = " + existingJaasConf);
+  }
   break;
 
 default:


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

[24/59] [abbrv] hadoop git commit: HADOOP-15070. add test to verify FileSystem and paths differentiate on user info. Contributed by Steve Loughran.

2018-02-26 Thread xyao
HADOOP-15070. add test to verify FileSystem and paths differentiate on user 
info.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d37cf67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d37cf67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d37cf67

Branch: refs/heads/HDFS-7240
Commit: 1d37cf675c42f59fab3c7d14d1bad384e4180cbd
Parents: 9497215
Author: Steve Loughran 
Authored: Mon Feb 19 20:43:40 2018 +
Committer: Steve Loughran 
Committed: Mon Feb 19 20:43:40 2018 +

--
 .../apache/hadoop/fs/TestFileSystemCaching.java | 233 +--
 1 file changed, 107 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d37cf67/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
index 69ef71e..b3c3847 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
@@ -18,22 +18,20 @@
 
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertNotSame;
-
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+
 import org.junit.Test;
 import java.security.PrivilegedExceptionAction;
 import java.util.concurrent.Semaphore;
 
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.*;
 
@@ -42,14 +40,13 @@ public class TestFileSystemCaching {
 
   @Test
   public void testCacheEnabled() throws Exception {
-Configuration conf = new Configuration();
-conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", 
null).getName());
+Configuration conf = newConf();
 FileSystem fs1 = FileSystem.get(new URI("cachedfile://a"), conf);
 FileSystem fs2 = FileSystem.get(new URI("cachedfile://a"), conf);
 assertSame(fs1, fs2);
   }
 
-  static class DefaultFs extends LocalFileSystem {
+  private static class DefaultFs extends LocalFileSystem {
 URI uri;
 @Override
 public void initialize(URI uri, Configuration conf) {
@@ -67,43 +64,30 @@ public class TestFileSystemCaching {
 conf.set("fs.defaultfs.impl", DefaultFs.class.getName());
 final URI defaultUri = URI.create("defaultfs://host");
 FileSystem.setDefaultUri(conf, defaultUri);
-FileSystem fs = null;
-
+
 // sanity check default fs
 final FileSystem defaultFs = FileSystem.get(conf);
 assertEquals(defaultUri, defaultFs.getUri());
 
 // has scheme, no auth
-fs = FileSystem.get(URI.create("defaultfs:/"), conf);
-assertSame(defaultFs, fs);
-fs = FileSystem.get(URI.create("defaultfs:///"), conf);
-assertSame(defaultFs, fs);
+assertSame(defaultFs, FileSystem.get(URI.create("defaultfs:/"), conf));
+assertSame(defaultFs, FileSystem.get(URI.create("defaultfs:///"), conf));
 
 // has scheme, same auth
-fs = FileSystem.get(URI.create("defaultfs://host"), conf);
-assertSame(defaultFs, fs);
+assertSame(defaultFs, FileSystem.get(URI.create("defaultfs://host"), 
conf));
 
 // has scheme, different auth
-fs = FileSystem.get(URI.create("defaultfs://host2"), conf);
-assertNotSame(defaultFs, fs);
+assertNotSame(defaultFs,
+FileSystem.get(URI.create("defaultfs://host2"), conf));
 
 // no scheme, no auth
-fs = FileSystem.get(URI.create("/"), conf);
-assertSame(defaultFs, fs);
+assertSame(defaultFs, FileSystem.get(URI.create("/"), conf));
 
 // no scheme, same auth
-try {
-  fs = FileSystem.get(URI.create("//host"), conf);
-  fail("got fs with auth but no scheme");
-} catch (UnsupportedFileSystemException e) {
-}
-
-// no scheme, different auth
-try {
-  fs = FileSystem.get(URI.create("//host2"), conf);
-  fail("got fs with auth but no scheme");
-} catch (UnsupportedFileSystemException e) {
-}
+intercept(UnsupportedFileSystemException.class,
+() -> FileSystem.get(URI.create("//host"), conf));
+

[36/59] [abbrv] hadoop git commit: YARN-7947. Capacity Scheduler intra-queue preemption can NPE for non-schedulable apps. Contributed by Eric Payne.

2018-02-26 Thread xyao
YARN-7947. Capacity Scheduler intra-queue preemption can NPE for 
non-schedulable apps. Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bdd2a184
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bdd2a184
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bdd2a184

Branch: refs/heads/HDFS-7240
Commit: bdd2a184d78379d99c802a43ebec7d2cef0bbaf7
Parents: 86b227a
Author: Sunil G 
Authored: Wed Feb 21 14:35:57 2018 +0530
Committer: Sunil G 
Committed: Wed Feb 21 14:35:57 2018 +0530

--
 .../monitor/capacity/FifoIntraQueuePreemptionPlugin.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd2a184/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
index 3332f2a..1776bd4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
@@ -412,7 +412,7 @@ public class FifoIntraQueuePreemptionPlugin
 TempUserPerPartition tmpUser = new TempUserPerPartition(
 tq.leafQueue.getUser(userName), tq.queueName,
 Resources.clone(userResourceUsage.getUsed(partition)),
-Resources.clone(userSpecificAmUsed),
+Resources.clone(amUsed),
 Resources.clone(userResourceUsage.getReserved(partition)),
 Resources.none());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/59] [abbrv] hadoop git commit: HDFS-13136. Avoid taking FSN lock while doing group member lookup for FSD permission check. Contributed by Xiaoyu Yao.

2018-02-26 Thread xyao
HDFS-13136. Avoid taking FSN lock while doing group member lookup for FSD 
permission check. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84a1321f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84a1321f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84a1321f

Branch: refs/heads/HDFS-7240
Commit: 84a1321f6aa0af6895564a7c47f8f264656f0294
Parents: 3132709
Author: Xiaoyu Yao 
Authored: Thu Feb 15 00:02:05 2018 -0800
Committer: Xiaoyu Yao 
Committed: Thu Feb 22 11:32:32 2018 -0800

--
 .../server/namenode/EncryptionZoneManager.java  |   3 +-
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |  28 ++-
 .../hdfs/server/namenode/FSDirAttrOp.java   |  54 +++---
 .../hdfs/server/namenode/FSDirConcatOp.java |   5 +-
 .../hdfs/server/namenode/FSDirDeleteOp.java |   8 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  |  12 +-
 .../hdfs/server/namenode/FSDirMkdirOp.java  |   3 +-
 .../hdfs/server/namenode/FSDirRenameOp.java |  11 +-
 .../hdfs/server/namenode/FSDirSnapshotOp.java   |  38 ++--
 .../server/namenode/FSDirStatAndListingOp.java  |  35 ++--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  35 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 190 ---
 .../hdfs/server/namenode/NameNodeAdapter.java   |   5 +-
 .../hdfs/server/namenode/TestAuditLogger.java   |   3 +-
 .../namenode/TestAuditLoggerWithCommands.java   |   4 +-
 15 files changed, 243 insertions(+), 191 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 3fcf797..176ae1d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -154,9 +154,10 @@ public class EncryptionZoneManager {
   public void pauseForTestingAfterNthCheckpoint(final String zone,
   final int count) throws IOException {
 INodesInPath iip;
+final FSPermissionChecker pc = dir.getPermissionChecker();
 dir.readLock();
 try {
-  iip = dir.resolvePath(dir.getPermissionChecker(), zone, DirOp.READ);
+  iip = dir.resolvePath(pc, zone, DirOp.READ);
 } finally {
   dir.readUnlock();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index cc51430..7b3471d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -36,11 +36,10 @@ import java.util.List;
 
 class FSDirAclOp {
   static FileStatus modifyAclEntries(
-  FSDirectory fsd, final String srcArg, List aclSpec)
-  throws IOException {
+  FSDirectory fsd, FSPermissionChecker pc, final String srcArg,
+  List aclSpec) throws IOException {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
-FSPermissionChecker pc = fsd.getPermissionChecker();
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -61,11 +60,10 @@ class FSDirAclOp {
   }
 
   static FileStatus removeAclEntries(
-  FSDirectory fsd, final String srcArg, List aclSpec)
-  throws IOException {
+  FSDirectory fsd, FSPermissionChecker pc, final String srcArg,
+  List aclSpec) throws IOException {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
-FSPermissionChecker pc = fsd.getPermissionChecker();
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -85,11 +83,10 @@ class FSDirAclOp {
 return fsd.getAuditFileInfo(iip);
   }
 
-  static FileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg)
-  throws IOException {
+  static FileStatus removeDefaultAcl(FSDirectory fsd, FSPermissionChecker pc,
+  final String srcArg) throws IOException {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
-FSPermissionChecker pc = 

[38/59] [abbrv] hadoop git commit: YARN-5028. RMStateStore should trim down app state for completed applications. Contributed by Gergo Repas.

2018-02-26 Thread xyao
YARN-5028. RMStateStore should trim down app state for completed applications. 
Contributed by Gergo Repas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92cbbfe7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92cbbfe7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92cbbfe7

Branch: refs/heads/HDFS-7240
Commit: 92cbbfe79ec009a19a71a7f44329a4b2f9fa9be6
Parents: 004b722
Author: Yufei Gu 
Authored: Wed Feb 21 11:42:26 2018 -0800
Committer: Yufei Gu 
Committed: Wed Feb 21 11:42:51 2018 -0800

--
 .../resourcemanager/recovery/RMStateStore.java  | 34 +-
 .../recovery/RMStateStoreTestBase.java  |  3 +
 .../recovery/TestZKRMStateStore.java| 66 
 3 files changed, 102 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92cbbfe7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index f0ab324..bbe208d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import 
org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerLaunchContextPBImpl;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
@@ -65,6 +66,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.recovery.records.Applicatio
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AggregateAppResourceUsage;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
@@ -257,6 +259,9 @@ public abstract class RMStateStore extends AbstractService {
   appState.getApplicationSubmissionContext().getApplicationId();
   LOG.info("Updating info for app: " + appId);
   try {
+if (isAppStateFinal(appState)) {
+  pruneAppState(appState);
+}
 store.updateApplicationStateInternal(appId, appState);
 if (((RMStateUpdateAppEvent) event).isNotifyApplication()) {
   store.notifyApplication(new RMAppEvent(appId,
@@ -276,7 +281,34 @@ public abstract class RMStateStore extends AbstractService 
{
 }
   }
   return finalState(isFenced);
-};
+}
+
+private boolean isAppStateFinal(ApplicationStateData appState) {
+  RMAppState state = appState.getState();
+  return state == RMAppState.FINISHED || state == RMAppState.FAILED ||
+  state == RMAppState.KILLED;
+}
+
+private void pruneAppState(ApplicationStateData appState) {
+  ApplicationSubmissionContext srcCtx =
+  appState.getApplicationSubmissionContext();
+  ApplicationSubmissionContextPBImpl context =
+  new ApplicationSubmissionContextPBImpl();
+  // most fields in the ApplicationSubmissionContext are not needed,
+  // but the following few need to be present for recovery to succeed
+  context.setApplicationId(srcCtx.getApplicationId());
+  context.setResource(srcCtx.getResource());
+  context.setQueue(srcCtx.getQueue());
+  context.setAMContainerResourceRequests(
+  srcCtx.getAMContainerResourceRequests());
+  context.setApplicationType(srcCtx.getApplicationType());
+  ContainerLaunchContextPBImpl amContainerSpec =
+  new ContainerLaunchContextPBImpl();
+ 

[07/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnFamily.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnFamily.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnFamily.java
deleted file mode 100644
index f3f045e..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnFamily.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents the app_flow table column families.
- */
-public enum AppToFlowColumnFamily implements ColumnFamily {
-  /**
-   * Mapping column family houses known columns such as flowName and flowRunId.
-   */
-  MAPPING("m");
-
-  /**
-   * Byte representation of this column family.
-   */
-  private final byte[] bytes;
-
-  /**
-   * @param value create a column family with this name. Must be lower case and
-   *  without spaces.
-   */
-  AppToFlowColumnFamily(String value) {
-// column families should be lower case and not contain any spaces.
-this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
-  }
-
-  public byte[] getBytes() {
-return Bytes.copy(bytes);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
deleted file mode 100644
index 752a380..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import 

[21/59] [abbrv] hadoop git commit: HDFS-12998. SnapshotDiff - Provide an iterator-based listing API for calculating snapshotDiff. Contributed by Shashikant Banerjee

2018-02-26 Thread xyao
HDFS-12998. SnapshotDiff - Provide an iterator-based listing API for 
calculating snapshotDiff. Contributed by Shashikant Banerjee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83e2bb98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83e2bb98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83e2bb98

Branch: refs/heads/HDFS-7240
Commit: 83e2bb98eea45ddcb598080f68a2f69de1f04485
Parents: 4d4dde5
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Feb 19 11:42:10 2018 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Feb 19 11:42:10 2018 +0800

--
 .../hadoop/hdfs/DistributedFileSystem.java  |  87 +
 .../snapshot/TestSnapshotDiffReport.java| 130 +++
 2 files changed, 217 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83e2bb98/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 3883f2f..35b6417 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1994,6 +1994,93 @@ public class DistributedFileSystem extends FileSystem
 }.resolve(this, absF);
   }
 
+  /**
+   * Returns a remote iterator so that followup calls are made on demand
+   * while consuming the SnapshotDiffReportListing entries.
+   * This reduces memory consumption overhead in case the snapshotDiffReport
+   * is huge.
+   *
+   * @param snapshotDir
+   *  full path of the directory where snapshots are taken
+   * @param fromSnapshot
+   *  snapshot name of the from point. Null indicates the current
+   *  tree
+   * @param toSnapshot
+   *  snapshot name of the to point. Null indicates the current
+   *  tree.
+   * @return Remote iterator
+   */
+  public RemoteIterator
+   snapshotDiffReportListingRemoteIterator(
+  final Path snapshotDir, final String fromSnapshot,
+  final String toSnapshot) throws IOException {
+Path absF = fixRelativePart(snapshotDir);
+return new FileSystemLinkResolver
+() {
+  @Override
+  public RemoteIterator doCall(final Path p)
+  throws IOException {
+return new SnapshotDiffReportListingIterator(
+getPathName(p), fromSnapshot, toSnapshot);
+  }
+
+  @Override
+  public RemoteIterator next(final FileSystem 
fs,
+  final Path p) throws IOException {
+return ((DistributedFileSystem) fs)
+.snapshotDiffReportListingRemoteIterator(p, fromSnapshot,
+toSnapshot);
+  }
+}.resolve(this, absF);
+
+  }
+
+  /**
+   * This class defines an iterator that returns
+   * the SnapshotDiffReportListing for a snapshottable directory
+   * between two given snapshots.
+   */
+  private final class SnapshotDiffReportListingIterator implements
+  RemoteIterator {
+private final String snapshotDir;
+private final String fromSnapshot;
+private final String toSnapshot;
+
+private byte[] startPath;
+private int index;
+private boolean hasNext = true;
+
+private SnapshotDiffReportListingIterator(String snapshotDir,
+String fromSnapshot, String toSnapshot) {
+  this.snapshotDir = snapshotDir;
+  this.fromSnapshot = fromSnapshot;
+  this.toSnapshot = toSnapshot;
+  this.startPath = DFSUtilClient.EMPTY_BYTES;
+  this.index = -1;
+}
+
+@Override
+public boolean hasNext() {
+  return hasNext;
+}
+
+@Override
+public SnapshotDiffReportListing next() throws IOException {
+  if (!hasNext) {
+throw new java.util.NoSuchElementException(
+"No more entry in SnapshotDiffReport for " + snapshotDir);
+  }
+  final SnapshotDiffReportListing part =
+  dfs.getSnapshotDiffReportListing(snapshotDir, fromSnapshot,
+  toSnapshot, startPath, index);
+  startPath = part.getLastPath();
+  index = part.getLastIndex();
+  hasNext =
+  !(Arrays.equals(startPath, DFSUtilClient.EMPTY_BYTES) && index == 
-1);
+  return part;
+}
+  }
+
   private SnapshotDiffReport getSnapshotDiffReportInternal(
   final String snapshotDir, final String fromSnapshot,
   final String toSnapshot) throws IOException {


[45/59] [abbrv] hadoop git commit: HADOOP-15236. Fix typo in RequestHedgingProxyProvider and RequestHedgingRMFailoverProxyProvider

2018-02-26 Thread xyao
HADOOP-15236. Fix typo in RequestHedgingProxyProvider and 
RequestHedgingRMFailoverProxyProvider

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c36b4aa3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c36b4aa3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c36b4aa3

Branch: refs/heads/HDFS-7240
Commit: c36b4aa31ce25fbe5fa173bce36da2950d74a475
Parents: 514794e
Author: Gabor Bota 
Authored: Fri Feb 23 13:55:18 2018 +0900
Committer: Akira Ajisaka 
Committed: Fri Feb 23 13:55:18 2018 +0900

--
 .../hdfs/server/namenode/ha/RequestHedgingProxyProvider.java   | 2 +-
 .../hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36b4aa3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 08edfe2..010e9e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -45,7 +45,7 @@ import org.slf4j.LoggerFactory;
  * per-se. It constructs a wrapper proxy that sends the request to ALL
  * underlying proxies simultaneously. It assumes the in an HA setup, there will
  * be only one Active, and the active should respond faster than any configured
- * standbys. Once it receive a response from any one of the configred proxies,
+ * standbys. Once it receive a response from any one of the configured proxies,
  * outstanding requests to other proxies are immediately cancelled.
  */
 public class RequestHedgingProxyProvider extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36b4aa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java
index 4c16225..c1e9da1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java
@@ -49,7 +49,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
  * underlying proxies simultaneously. Each proxy inside the wrapper proxy will
  * retry the corresponding target. It assumes the in an HA setup, there will be
  * only one Active, and the active should respond faster than any configured
- * standbys. Once it receives a response from any one of the configred proxies,
+ * standbys. Once it receives a response from any one of the configured 
proxies,
  * outstanding requests to other proxies are immediately cancelled.
  */
 public class RequestHedgingRMFailoverProxyProvider


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
new file mode 100644
index 000..2b98eec
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
@@ -0,0 +1,313 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader.filter;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Set of utility methods used by timeline filter classes.
+ */
+public final class TimelineFilterUtils {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TimelineFilterUtils.class);
+
+  private TimelineFilterUtils() {
+  }
+
+  /**
+   * Returns the equivalent HBase filter list's {@link Operator}.
+   *
+   * @param op timeline filter list operator.
+   * @return HBase filter list's Operator.
+   */
+  private static Operator getHBaseOperator(TimelineFilterList.Operator op) {
+switch (op) {
+case AND:
+  return Operator.MUST_PASS_ALL;
+case OR:
+  return Operator.MUST_PASS_ONE;
+default:
+  throw new IllegalArgumentException("Invalid operator");
+}
+  }
+
+  /**
+   * Returns the equivalent HBase compare filter's {@link CompareOp}.
+   *
+   * @param op timeline compare op.
+   * @return HBase compare filter's CompareOp.
+   */
+  private static CompareOp getHBaseCompareOp(
+  TimelineCompareOp op) {
+switch (op) {
+case LESS_THAN:
+  return CompareOp.LESS;
+case LESS_OR_EQUAL:
+  return CompareOp.LESS_OR_EQUAL;
+case EQUAL:
+  return CompareOp.EQUAL;
+case NOT_EQUAL:
+  return CompareOp.NOT_EQUAL;
+case GREATER_OR_EQUAL:
+  return CompareOp.GREATER_OR_EQUAL;
+case GREATER_THAN:
+  return CompareOp.GREATER;
+default:
+  throw new IllegalArgumentException("Invalid compare operator");
+}
+  }
+
+  /**
+   * Converts a {@link TimelinePrefixFilter} to an equivalent HBase
+   * {@link QualifierFilter}.
+   * @param colPrefix
+   * @param filter
+   * @return a {@link QualifierFilter} object
+   */
+  private static > Filter 
createHBaseColQualPrefixFilter(
+  ColumnPrefix colPrefix, TimelinePrefixFilter filter) {
+return new QualifierFilter(getHBaseCompareOp(filter.getCompareOp()),
+new BinaryPrefixComparator(
+

[06/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
deleted file mode 100644
index c115b18..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.client.Query;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.text.NumberFormat;
-
-/**
- * A bunch of utility functions used in HBase TimelineService backend.
- */
-public final class HBaseTimelineStorageUtils {
-  /** milliseconds in one day. */
-  public static final long MILLIS_ONE_DAY = 8640L;
-  private static final Logger LOG =
-  LoggerFactory.getLogger(HBaseTimelineStorageUtils.class);
-
-  private HBaseTimelineStorageUtils() {
-  }
-
-
-  /**
-   * Combines the input array of attributes and the input aggregation operation
-   * into a new array of attributes.
-   *
-   * @param attributes Attributes to be combined.
-   * @param aggOp Aggregation operation.
-   * @return array of combined attributes.
-   */
-  public static Attribute[] combineAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int newLength = getNewLengthCombinedAttributes(attributes, aggOp);
-Attribute[] combinedAttributes = new Attribute[newLength];
-
-if (attributes != null) {
-  System.arraycopy(attributes, 0, combinedAttributes, 0, 
attributes.length);
-}
-
-if (aggOp != null) {
-  Attribute a2 = aggOp.getAttribute();
-  combinedAttributes[newLength - 1] = a2;
-}
-return combinedAttributes;
-  }
-
-  /**
-   * Returns a number for the new array size. The new array is the combination
-   * of input array of attributes and the input aggregation operation.
-   *
-   * @param attributes Attributes.
-   * @param aggOp Aggregation operation.
-   * @return the size for the new array
-   */
-  private static int getNewLengthCombinedAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int oldLength = getAttributesLength(attributes);
-int aggLength = getAppOpLength(aggOp);
-return oldLength + aggLength;
-  }
-
-  private static int getAppOpLength(AggregationOperation aggOp) {
-if (aggOp != null) {
-  return 1;
-}
-return 0;
-  }
-
-  private static int getAttributesLength(Attribute[] attributes) {
-if (attributes != null) {
-  return attributes.length;
-}
-  

[31/59] [abbrv] hadoop git commit: YARN-7732. Support Generic AM Simulator from SynthGenerator. (Contributed by Young Chen via curino)

2018-02-26 Thread xyao
YARN-7732. Support Generic AM Simulator from SynthGenerator. (Contributed by 
Young Chen via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84cea001
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84cea001
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84cea001

Branch: refs/heads/HDFS-7240
Commit: 84cea0011ffe510d24cf9f2952944f7a6fe622cf
Parents: 6f81cc0
Author: Carlo Curino 
Authored: Tue Feb 20 17:00:34 2018 -0800
Committer: Carlo Curino 
Committed: Tue Feb 20 17:00:34 2018 -0800

--
 hadoop-tools/hadoop-sls/pom.xml |   2 +
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 137 +++---
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  |   2 +-
 .../yarn/sls/appmaster/MRAMSimulator.java   |   7 +-
 .../yarn/sls/appmaster/StreamAMSimulator.java   | 273 +++
 .../hadoop/yarn/sls/appmaster/package-info.java |  21 +
 .../hadoop/yarn/sls/synthetic/SynthJob.java | 367 --
 .../yarn/sls/synthetic/SynthJobClass.java   | 180 ---
 .../sls/synthetic/SynthTraceJobProducer.java| 487 ---
 .../yarn/sls/synthetic/SynthWorkload.java   | 121 -
 .../hadoop/yarn/sls/BaseSLSRunnerTest.java  |   2 +-
 .../hadoop/yarn/sls/TestSLSGenericSynth.java|  76 +++
 .../hadoop/yarn/sls/TestSLSStreamAMSynth.java   |  76 +++
 .../hadoop/yarn/sls/TestSynthJobGeneration.java | 213 +++-
 .../yarn/sls/appmaster/TestAMSimulator.java |   2 +-
 .../src/test/resources/sls-runner.xml   |   4 +
 .../hadoop-sls/src/test/resources/syn.json  |   2 +-
 .../src/test/resources/syn_generic.json |  54 ++
 .../src/test/resources/syn_stream.json  |  46 ++
 19 files changed, 1430 insertions(+), 642 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/pom.xml
--
diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml
index a7cb9b2..ef5ac54 100644
--- a/hadoop-tools/hadoop-sls/pom.xml
+++ b/hadoop-tools/hadoop-sls/pom.xml
@@ -133,6 +133,8 @@
 src/test/resources/simulate.info.html.template
 src/test/resources/track.html.template
 src/test/resources/syn.json
+src/test/resources/syn_generic.json
+src/test/resources/syn_stream.json
 src/test/resources/inputsls.json
 src/test/resources/nodes.json
 src/test/resources/exit-invariants.txt

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 456602f..951c09d 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -47,13 +47,11 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.tools.rumen.JobTraceReader;
 import org.apache.hadoop.tools.rumen.LoggedJob;
 import org.apache.hadoop.tools.rumen.LoggedTask;
 import org.apache.hadoop.tools.rumen.LoggedTaskAttempt;
-import org.apache.hadoop.tools.rumen.TaskAttemptInfo;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -627,89 +625,66 @@ public class SLSRunner extends Configured implements Tool 
{
 localConf.set("fs.defaultFS", "file:///");
 long baselineTimeMS = 0;
 
-try {
+// if we use the nodeFile this could have been not initialized yet.
+if (stjp == null) {
+  stjp = new SynthTraceJobProducer(getConf(), new Path(inputTraces[0]));
+}
 
-  // if we use the nodeFile this could have been not initialized yet.
-  if (stjp == null) {
-stjp = new SynthTraceJobProducer(getConf(), new Path(inputTraces[0]));
-  }
+SynthJob job = null;
+// we use stjp, a reference to the job producer instantiated during node
+// creation
+while ((job = (SynthJob) stjp.getNextJob()) != null) {
+  // only support MapReduce currently
+  String user = job.getUser();
+  String jobQueue = job.getQueueName();
+  String oldJobId = job.getJobID().toString();
+  long jobStartTimeMS = 

[11/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
new file mode 100644
index 000..bb0e331
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.entity
+ * contains classes related to implementation for entity table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java
new file mode 100644
index 000..4e2cf2d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Identifies the compaction dimensions for the data in the {@link 
FlowRunTable}
+ * .
+ */
+public enum AggregationCompactionDimension {
+
+  /**
+   * the application id.
+   */
+  APPLICATION_ID((byte) 101);
+
+  private byte tagType;
+  private byte[] inBytes;
+
+  private AggregationCompactionDimension(byte tagType) {
+this.tagType = tagType;
+this.inBytes = Bytes.toBytes(this.name());
+  }
+
+  public Attribute 

[34/59] [abbrv] hadoop git commit: HDFS-13175. Add more information for checking argument in DiskBalancerVolume. Contributed by Lei (Eddy) Xu.

2018-02-26 Thread xyao
HDFS-13175. Add more information for checking argument in DiskBalancerVolume.
Contributed by  Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/121e1e12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/121e1e12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/121e1e12

Branch: refs/heads/HDFS-7240
Commit: 121e1e1280c7b019f6d2cc3ba9eae1ead0dd8408
Parents: b0d3c87
Author: Anu Engineer 
Authored: Tue Feb 20 19:16:30 2018 -0800
Committer: Anu Engineer 
Committed: Tue Feb 20 19:16:30 2018 -0800

--
 .../server/diskbalancer/command/PlanCommand.java| 16 
 .../connectors/DBNameNodeConnector.java |  2 --
 .../diskbalancer/datamodel/DiskBalancerVolume.java  |  4 +++-
 3 files changed, 11 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/121e1e12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index 6e45b96..b765885 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
@@ -124,6 +124,14 @@ public class PlanCommand extends Command {
   throw new IllegalArgumentException("Unable to find the specified node. " 
+
   cmd.getOptionValue(DiskBalancerCLI.PLAN));
 }
+
+try (FSDataOutputStream beforeStream = create(String.format(
+DiskBalancerCLI.BEFORE_TEMPLATE,
+cmd.getOptionValue(DiskBalancerCLI.PLAN {
+  beforeStream.write(getCluster().toJson()
+  .getBytes(StandardCharsets.UTF_8));
+}
+
 this.thresholdPercentage = getThresholdPercentage(cmd);
 
 LOG.debug("threshold Percentage is {}", this.thresholdPercentage);
@@ -138,14 +146,6 @@ public class PlanCommand extends Command {
   plan = plans.get(0);
 }
 
-
-try (FSDataOutputStream beforeStream = create(String.format(
-DiskBalancerCLI.BEFORE_TEMPLATE,
-cmd.getOptionValue(DiskBalancerCLI.PLAN {
-  beforeStream.write(getCluster().toJson()
-  .getBytes(StandardCharsets.UTF_8));
-}
-
 try {
   if (plan != null && plan.getVolumeSetPlans().size() > 0) {
 outputLine = String.format("Writing plan to:");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/121e1e12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
index b044baf..2d8ba8a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
@@ -144,8 +144,6 @@ class DBNameNodeConnector implements ClusterConnector {
   // Does it make sense ? Balancer does do that. Right now
   // we only deal with volumes and not blockPools
 
-  volume.setUsed(report.getDfsUsed());
-
   volume.setUuid(storage.getStorageID());
 
   // we will skip this volume for disk balancer if

http://git-wip-us.apache.org/repos/asf/hadoop/blob/121e1e12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
index 47a925c..a9fd7f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
@@ -269,7 +269,9 @@ public class DiskBalancerVolume {
* @param dfsUsedSpace - dfsUsedSpace for this 

[16/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnRWHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnRWHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnRWHelper.java
new file mode 100644
index 000..a8e5149
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnRWHelper.java
@@ -0,0 +1,487 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+/**
+ * A set of utility functions that read or read to a column.
+ * This class is meant to be used only by explicit Columns,
+ * and not directly to write by clients.
+ */
+public final class ColumnRWHelper {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ColumnHelper.class);
+
+  private ColumnRWHelper() {
+  }
+
+  /**
+   * Figures out the cell timestamp used in the Put For storing.
+   * Will supplement the timestamp if required. Typically done for flow run
+   * table.If we supplement the timestamp, we left shift the timestamp and
+   * supplement it with the AppId id so that there are no collisions in the 
flow
+   * run table's cells.
+   */
+  private static long getPutTimestamp(
+  Long timestamp, boolean supplementTs, Attribute[] attributes) {
+if (timestamp == null) {
+  timestamp = System.currentTimeMillis();
+}
+if (!supplementTs) {
+  return timestamp;
+} else {
+  String appId = getAppIdFromAttributes(attributes);
+  long supplementedTS = TimestampGenerator.getSupplementedTimestamp(
+  timestamp, appId);
+  return supplementedTS;
+}
+  }
+
+  private static String getAppIdFromAttributes(Attribute[] attributes) {
+if (attributes == null) {
+  return null;
+}
+String appId = null;
+for (Attribute attribute : attributes) {
+  if (AggregationCompactionDimension.APPLICATION_ID.toString().equals(
+  attribute.getName())) {
+appId = Bytes.toString(attribute.getValue());
+  }
+}
+return appId;
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param column the column that is to be modified
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store 

[14/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
new file mode 100644
index 000..faed348
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
@@ -0,0 +1,489 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Query;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+class SubApplicationEntityReader extends GenericEntityReader {
+  private static final SubApplicationTableRW SUB_APPLICATION_TABLE =
+  new SubApplicationTableRW();
+
+  SubApplicationEntityReader(TimelineReaderContext ctxt,
+  TimelineEntityFilters 

[27/59] [abbrv] hadoop git commit: HDFS-13159. TestTruncateQuotaUpdate fails in trunk. Contributed by Nanda kumar.

2018-02-26 Thread xyao
HDFS-13159. TestTruncateQuotaUpdate fails in trunk. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9028ccaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9028ccaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9028ccaf

Branch: refs/heads/HDFS-7240
Commit: 9028ccaf838621808e5e26a9fa933d28799538dd
Parents: 7280c5a
Author: Arpit Agarwal 
Authored: Tue Feb 20 10:57:35 2018 -0800
Committer: Arpit Agarwal 
Committed: Tue Feb 20 11:40:20 2018 -0800

--
 .../hdfs/server/namenode/TestTruncateQuotaUpdate.java | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9028ccaf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
index fcdd650..f200d5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
@@ -22,6 +22,8 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffListByArrayList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
@@ -156,11 +158,11 @@ public class TestTruncateQuotaUpdate {
 FileDiff diff = mock(FileDiff.class);
 when(diff.getBlocks()).thenReturn(blocks);
 FileDiffList diffList = new FileDiffList();
-Whitebox.setInternalState(diffList, "diffs", new ArrayList());
+Whitebox.setInternalState(diffList, "diffs", new DiffListByArrayList<>(0));
 @SuppressWarnings("unchecked")
-ArrayList diffs = ((ArrayList)Whitebox.getInternalState
-(diffList, "diffs"));
-diffs.add(diff);
+DiffList diffs = (DiffList)Whitebox.getInternalState(
+diffList, "diffs");
+diffs.addFirst(diff);
 FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffList);
 file.addFeature(sf);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
new file mode 100644
index 000..e89a6a7
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the application table.
+ */
+public class ApplicationRowKey {
+  private final String clusterId;
+  private final String userId;
+  private final String flowName;
+  private final Long flowRunId;
+  private final String appId;
+  private final ApplicationRowKeyConverter appRowKeyConverter =
+  new ApplicationRowKeyConverter();
+
+  public ApplicationRowKey(String clusterId, String userId, String flowName,
+  Long flowRunId, String appId) {
+this.clusterId = clusterId;
+this.userId = userId;
+this.flowName = flowName;
+this.flowRunId = flowRunId;
+this.appId = appId;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  public String getFlowName() {
+return flowName;
+  }
+
+  public Long getFlowRunId() {
+return flowRunId;
+  }
+
+  public String getAppId() {
+return appId;
+  }
+
+  /**
+   * Constructs a row key for the application table as follows:
+   * {@code clusterId!userName!flowName!flowRunId!AppId}.
+   *
+   * @return byte array with the row key
+   */
+  public byte[] getRowKey() {
+return appRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return An ApplicationRowKey object.
+   */
+  public static ApplicationRowKey parseRowKey(byte[] rowKey) {
+return new ApplicationRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Constructs a row key for the application table as follows:
+   * {@code clusterId!userName!flowName!flowRunId!AppId}.
+   * @return String representation of row key.
+   */
+  public String getRowKeyAsString() {
+return appRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A ApplicationRowKey object.
+   */
+  public static ApplicationRowKey parseRowKeyFromString(String encodedRowKey) {
+return new ApplicationRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for application table. The row key is of the
+   * form: 

[04/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
deleted file mode 100644
index f521cd7..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-
-/**
- * Identifies partially qualified columns for the {@link FlowRunTable}.
- */
-public enum FlowRunColumnPrefix implements ColumnPrefix {
-
-  /**
-   * To store flow run info values.
-   */
-  METRIC(FlowRunColumnFamily.INFO, "m", null, new LongConverter());
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-
-  /**
-   * Can be null for those cases where the provided column qualifier is the
-   * entire column name.
-   */
-  private final String columnPrefix;
-  private final byte[] columnPrefixBytes;
-
-  private final AggregationOperation aggOp;
-
-  /**
-   * Private constructor, meant to be used by the enum definition.
-   *
-   * @param columnFamily that this column is stored in.
-   * @param columnPrefix for this column.
-   */
-  private FlowRunColumnPrefix(ColumnFamily columnFamily,
-  String columnPrefix, AggregationOperation fra, ValueConverter converter) 
{
-this(columnFamily, columnPrefix, fra, converter, false);
-  }
-
-  private FlowRunColumnPrefix(ColumnFamily columnFamily,
-  String columnPrefix, AggregationOperation fra, ValueConverter converter,
-  boolean compoundColQual) {
-column = new ColumnHelper(columnFamily, converter, true);
-this.columnFamily = columnFamily;
-this.columnPrefix = columnPrefix;
-if (columnPrefix == null) {
-  this.columnPrefixBytes = null;
-} else {
-  // Future-proof by ensuring the right column prefix hygiene.
-  this.columnPrefixBytes =
-  Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
-}
-this.aggOp = fra;
-  }
-
-  /**
-   * @return the column name value
-   */
-  public String getColumnPrefix() {
-return columnPrefix;
-  }
-
-  public byte[] getColumnPrefixBytes() {
-return columnPrefixBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
-return ColumnHelper.getColumnQualifier(this.columnPrefixBytes,
-qualifierPrefix);
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
-return ColumnHelper.getColumnQualifier(this.columnPrefixBytes,
-qualifierPrefix);

[37/59] [abbrv] hadoop git commit: HADOOP-15247. Move commons-net up to 3.6. Contributed by Steve Loughran.

2018-02-26 Thread xyao
HADOOP-15247. Move commons-net up to 3.6.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/004b7223
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/004b7223
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/004b7223

Branch: refs/heads/HDFS-7240
Commit: 004b722372de67635a24e71b264b3b604df4b693
Parents: bdd2a18
Author: Steve Loughran 
Authored: Wed Feb 21 10:40:42 2018 +
Committer: Steve Loughran 
Committed: Wed Feb 21 10:40:42 2018 +

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/004b7223/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index ce51c99..f4ac239 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -612,7 +612,7 @@
   
 commons-net
 commons-net
-3.1
+3.6
   
   
 javax.servlet


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 74484754a -> 1e634d49d


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
deleted file mode 100644
index 785a243..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The sub application table has column families:
- * info, config and metrics.
- * Info stores information about a timeline entity object
- * config stores configuration data of a timeline entity object
- * metrics stores the metrics of a timeline entity object
- *
- * Example sub application table record:
- *
- * 
- * |-|
- * |  Row  | Column Family | Column Family| Column Family|
- * |  key  | info  | metrics  | config   |
- * |-|
- * | subAppUserId! | id:entityId   | metricId1:   | configKey1:  |
- * | clusterId!| type:entityType   | metricValue1 | configValue1 |
- * | entityType!   |   | @timestamp1  |  |
- * | idPrefix!||   |  | configKey2:  |
- * | entityId! | created_time: | metricId1:   | configValue2 |
- * | userId| 1392993084018 | metricValue2 |  |
- * |   |   | @timestamp2  |  |
- * |   | i!infoKey:|  |  |
- * |   | infoValue | metricId1:   |  |
- * |   |   | metricValue1 |  |
- * |   |   | @timestamp2  |  |
- * |   | e!eventId=timestamp=  |  |  |
- * |   | infoKey:  |  |  |
- * |   | eventInfoValue|  |  |
- * |   |   |  |  |
- * |   | r!relatesToKey:   |  |  |
- * |   | id3=id4=id5   |  |  |
- * |   |   |  |  |
- * |   | s!isRelatedToKey  |  |  |
- * |   | id7=id9=id6   |  |  |
- * |   |   |  |  |
- * |   | 

[09/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
new file mode 100644
index 000..41a371b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
@@ -0,0 +1,277 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScanType;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineServerUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Coprocessor for flow run table.
+ */
+public class FlowRunCoprocessor extends BaseRegionObserver {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FlowRunCoprocessor.class);
+
+  private Region region;
+  /**
+   * generate a timestamp that is unique per row in a region this is per 
region.
+   */
+  private final TimestampGenerator timestampGenerator =
+  new TimestampGenerator();
+
+  @Override
+  public void start(CoprocessorEnvironment e) throws IOException {
+if (e instanceof RegionCoprocessorEnvironment) {
+  RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
+  this.region = env.getRegion();
+}
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * This method adds the tags onto the cells in the Put. It is presumed that
+   * all the cells in one Put have the same set of Tags. The existing cell
+   * timestamp is overwritten for non-metric cells and each such cell gets a 
new
+   * unique timestamp generated by {@link TimestampGenerator}
+   *
+   * @see
+   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#prePut(org.apache
+   * .hadoop.hbase.coprocessor.ObserverContext,
+   * org.apache.hadoop.hbase.client.Put,
+   * org.apache.hadoop.hbase.regionserver.wal.WALEdit,
+   * org.apache.hadoop.hbase.client.Durability)
+   */
+  @Override
+  public void 

[15/59] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

2018-02-26 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
new file mode 100644
index 000..7440316
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
@@ -0,0 +1,523 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Query;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTableRW;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for application entities that are stored in the
+ * application table.
+ */
+class ApplicationEntityReader extends GenericEntityReader {
+  private static final 

hadoop git commit: HADOOP-15265. Exclude json-smart explicitly in hadoop-auth avoid being pulled in transitively. Contributed by Nishant Bangarwa.

2018-02-26 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 eb8765bbe -> 0ccd7138a


HADOOP-15265. Exclude json-smart explicitly in hadoop-auth avoid being pulled 
in transitively. Contributed by Nishant Bangarwa.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ccd7138
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ccd7138
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ccd7138

Branch: refs/heads/branch-3.1
Commit: 0ccd7138a777edb606de4512d0cd6ef9bb1fee6d
Parents: eb8765b
Author: Arpit Agarwal 
Authored: Mon Feb 26 13:56:53 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Feb 26 13:58:54 2018 -0800

--
 hadoop-common-project/hadoop-auth/pom.xml | 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ccd7138/hadoop-common-project/hadoop-auth/pom.xml
--
diff --git a/hadoop-common-project/hadoop-auth/pom.xml 
b/hadoop-common-project/hadoop-auth/pom.xml
index 9308d0c..9a9c116 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -110,9 +110,21 @@
   org.bouncycastle
   bcprov-jdk15on
 
+
+
+  net.minidev
+  json-smart
+
   
 
 
+  net.minidev
+  json-smart
+
+
   org.apache.zookeeper
   zookeeper
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HDFS-12781. After Datanode down, In Namenode UI Datanode tab is throwing warning message.. Contributed by Brahma Reddy Battula.

2018-02-26 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk e85188101 -> 78a10029e


HDFS-12781. After Datanode down, In Namenode UI Datanode tab is throwing 
warning message.. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46c93453
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46c93453
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46c93453

Branch: refs/heads/trunk
Commit: 46c93453e9eb66f91b67e011abc96113f9b0
Parents: e851881
Author: Arpit Agarwal 
Authored: Sat Feb 24 14:52:35 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Feb 26 13:56:34 2018 -0800

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js  | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46c93453/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
index 4bc8e86..f672f1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
@@ -331,14 +331,14 @@
   $('#table-datanodes').dataTable( {
 'lengthMenu': [ [25, 50, 100, -1], [25, 50, 100, "All"] ],
 'columns': [
-  { 'orderDataType': 'ng-value', 'searchable': true },
-  { 'orderDataType': 'ng-value', 'searchable': true },
-  { 'orderDataType': 'ng-value', 'type': 'num' },
-  { 'orderDataType': 'ng-value', 'type': 'num' },
-  { 'orderDataType': 'ng-value', 'type': 'num' },
-  { 'type': 'num' },
-  { 'orderDataType': 'ng-value', 'type': 'num'},
-  { 'type': 'string' }
+  { 'orderDataType': 'ng-value', 'searchable': true , 
"defaultContent": "" },
+  { 'orderDataType': 'ng-value', 'searchable': true , 
"defaultContent": ""},
+  { 'orderDataType': 'ng-value', 'type': 'num' , "defaultContent": 
0},
+  { 'orderDataType': 'ng-value', 'type': 'num' , "defaultContent": 
0},
+  { 'orderDataType': 'ng-value', 'type': 'num' , "defaultContent": 
0},
+  { 'type': 'num' , "defaultContent": 0},
+  { 'orderDataType': 'ng-value', 'type': 'num' , "defaultContent": 
0},
+  { 'type': 'string' , "defaultContent": ""}
 ]});
   renderHistogram(data);
   $('#ui-tabs a[href="#tab-datanode"]').tab('show');


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HADOOP-15265. Exclude json-smart explicitly in hadoop-auth avoid being pulled in transitively. Contributed by Nishant Bangarwa.

2018-02-26 Thread arp
HADOOP-15265. Exclude json-smart explicitly in hadoop-auth avoid being pulled 
in transitively. Contributed by Nishant Bangarwa.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78a10029
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78a10029
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78a10029

Branch: refs/heads/trunk
Commit: 78a10029ec5b2ecc7b9448be6dc6a1875196a68f
Parents: 46c93453
Author: Arpit Agarwal 
Authored: Mon Feb 26 13:56:53 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Feb 26 13:56:53 2018 -0800

--
 hadoop-common-project/hadoop-auth/pom.xml | 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78a10029/hadoop-common-project/hadoop-auth/pom.xml
--
diff --git a/hadoop-common-project/hadoop-auth/pom.xml 
b/hadoop-common-project/hadoop-auth/pom.xml
index 12fe971..8aae2c7 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -110,9 +110,21 @@
   org.bouncycastle
   bcprov-jdk15on
 
+
+
+  net.minidev
+  json-smart
+
   
 
 
+  net.minidev
+  json-smart
+
+
   org.apache.zookeeper
   zookeeper
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15266. [branch-2] Upper/Lower case conversion support for group names in LdapGroupsMapping. Contributed by Nanda kumar.

2018-02-26 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4b43f2aa5 -> 0945f0eb2


HADOOP-15266.  [branch-2] Upper/Lower case conversion support for group names 
in LdapGroupsMapping. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0945f0eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0945f0eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0945f0eb

Branch: refs/heads/branch-2
Commit: 0945f0eb233dd77d2b4de4560773ba17a7ce2e7b
Parents: 4b43f2a
Author: Arpit Agarwal 
Authored: Mon Feb 26 13:45:51 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Feb 26 13:45:51 2018 -0800

--
 .../security/RuleBasedLdapGroupsMapping.java| 95 +++
 .../src/main/resources/core-default.xml | 13 +++
 .../conf/TestCommonConfigurationFields.java |  4 +-
 .../TestRuleBasedLdapGroupsMapping.java | 99 
 4 files changed, 210 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0945f0eb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java
new file mode 100644
index 000..be7a5a4
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * This class uses {@link LdapGroupsMapping} for group lookup and applies the
+ * rule configured on the group names.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Evolving
+public class RuleBasedLdapGroupsMapping extends LdapGroupsMapping {
+
+  public static final String CONVERSION_RULE_KEY = LDAP_CONFIG_PREFIX +
+  ".conversion.rule";
+
+  private static final String CONVERSION_RULE_DEFAULT = "none";
+  private static final Logger LOG =
+  LoggerFactory.getLogger(RuleBasedLdapGroupsMapping.class);
+
+  private Rule rule;
+
+  /**
+   * Supported rules applicable for group name modification.
+   */
+  private enum Rule {
+TO_UPPER, TO_LOWER, NONE
+  }
+
+  @Override
+  public synchronized void setConf(Configuration conf) {
+super.setConf(conf);
+String value = conf.get(CONVERSION_RULE_KEY, CONVERSION_RULE_DEFAULT);
+try {
+  rule = Rule.valueOf(value.toUpperCase());
+} catch (IllegalArgumentException iae) {
+  LOG.warn("Invalid {} configured: '{}'. Using default value: '{}'",
+  CONVERSION_RULE_KEY, value, CONVERSION_RULE_DEFAULT);
+}
+  }
+
+/**
+ * Returns list of groups for a user.
+ * This calls {@link LdapGroupsMapping}'s getGroups and applies the
+ * configured rules on group names before returning.
+ *
+ * @param user get groups for this user
+ * @return list of groups for a given user
+ */
+  @Override
+  public synchronized List getGroups(String user) {
+List groups = super.getGroups(user);
+List result = new ArrayList<>(groups.size());
+switch (rule) {
+case TO_UPPER:
+  for (String group : groups) {
+result.add(StringUtils.toUpperCase(group));
+  }
+  return result;
+case TO_LOWER:
+  for (String group : groups) {
+result.add(StringUtils.toLowerCase(group));
+  }
+  return result;
+case NONE:
+default:
+  return 

[2/2] hadoop git commit: YARN-7921. Transform a PlacementConstraint to a string expression. Contributed by Weiwei Yang.

2018-02-26 Thread kkaranasos
YARN-7921. Transform a PlacementConstraint to a string expression. Contributed 
by Weiwei Yang.

(cherry picked from commit e85188101c6c74b348a2fb6aa0f4e85c81b4a28c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb8765bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb8765bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb8765bb

Branch: refs/heads/branch-3.1
Commit: eb8765bbe9ddbf435247cc171113646c88c228c0
Parents: 33f8232
Author: Konstantinos Karanasos 
Authored: Mon Feb 26 12:15:16 2018 -0800
Committer: Konstantinos Karanasos 
Committed: Mon Feb 26 12:17:08 2018 -0800

--
 .../yarn/api/resource/PlacementConstraint.java  | 141 ++-
 .../resource/TestPlacementConstraintParser.java | 102 --
 .../TestPlacementConstraintTransformations.java |   7 +
 3 files changed, 235 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb8765bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
index c054cbc..9bb17f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -23,6 +23,8 @@ import java.util.Arrays;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.Iterator;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -45,6 +47,11 @@ public class PlacementConstraint {
 this.constraintExpr = constraintExpr;
   }
 
+  @Override
+  public String toString() {
+return this.constraintExpr.toString();
+  }
+
   /**
* Get the constraint expression of the placement constraint.
*
@@ -226,6 +233,42 @@ public class PlacementConstraint {
 }
 
 @Override
+public String toString() {
+  int max = getMaxCardinality();
+  int min = getMinCardinality();
+  List targetExprList = getTargetExpressions().stream()
+  .map(TargetExpression::toString).collect(Collectors.toList());
+  List targetConstraints = new ArrayList<>();
+  for (String targetExpr : targetExprList) {
+if (min == 0 && max == 0) {
+  // anti-affinity
+  targetConstraints.add(new StringBuilder()
+  .append("notin").append(",")
+  .append(getScope()).append(",")
+  .append(targetExpr)
+  .toString());
+} else if (min == 1 && max == Integer.MAX_VALUE) {
+  // affinity
+  targetConstraints.add(new StringBuilder()
+  .append("in").append(",")
+  .append(getScope()).append(",")
+  .append(targetExpr)
+  .toString());
+} else {
+  // cardinality
+  targetConstraints.add(new StringBuilder()
+  .append("cardinality").append(",")
+  .append(getScope()).append(",")
+  .append(targetExpr).append(",")
+  .append(min).append(",")
+  .append(max)
+  .toString());
+}
+  }
+  return String.join(":", targetConstraints);
+}
+
+@Override
 public  T accept(Visitor visitor) {
   return visitor.visit(this);
 }
@@ -326,6 +369,23 @@ public class PlacementConstraint {
 }
 
 @Override
+public String toString() {
+  StringBuffer sb = new StringBuffer();
+  if (TargetType.ALLOCATION_TAG == this.targetType) {
+// following by a comma separated tags
+sb.append(String.join(",", getTargetValues()));
+  } else if (TargetType.NODE_ATTRIBUTE == this.targetType) {
+// following by a comma separated key value pairs
+if (this.getTargetValues() != null) {
+  String attributeName = this.getTargetKey();
+  String attributeValues = String.join(":", this.getTargetValues());
+  sb.append(attributeName + "=[" + attributeValues + "]");
+}
+  }
+  return sb.toString();
+}
+
+@Override
 public  T accept(Visitor visitor) {
   return visitor.visit(this);
 }
@@ -345,7 +405,16 @@ public class PlacementConstraint {
  * TargetOperator enum helps to specify type.
  */
 

[1/2] hadoop git commit: YARN-7921. Transform a PlacementConstraint to a string expression. Contributed by Weiwei Yang.

2018-02-26 Thread kkaranasos
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 33f82323b -> eb8765bbe
  refs/heads/trunk 451265a83 -> e85188101


YARN-7921. Transform a PlacementConstraint to a string expression. Contributed 
by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8518810
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8518810
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8518810

Branch: refs/heads/trunk
Commit: e85188101c6c74b348a2fb6aa0f4e85c81b4a28c
Parents: 451265a
Author: Konstantinos Karanasos 
Authored: Mon Feb 26 12:15:16 2018 -0800
Committer: Konstantinos Karanasos 
Committed: Mon Feb 26 12:15:16 2018 -0800

--
 .../yarn/api/resource/PlacementConstraint.java  | 141 ++-
 .../resource/TestPlacementConstraintParser.java | 102 --
 .../TestPlacementConstraintTransformations.java |   7 +
 3 files changed, 235 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8518810/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
index c054cbc..9bb17f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -23,6 +23,8 @@ import java.util.Arrays;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.Iterator;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -45,6 +47,11 @@ public class PlacementConstraint {
 this.constraintExpr = constraintExpr;
   }
 
+  @Override
+  public String toString() {
+return this.constraintExpr.toString();
+  }
+
   /**
* Get the constraint expression of the placement constraint.
*
@@ -226,6 +233,42 @@ public class PlacementConstraint {
 }
 
 @Override
+public String toString() {
+  int max = getMaxCardinality();
+  int min = getMinCardinality();
+  List targetExprList = getTargetExpressions().stream()
+  .map(TargetExpression::toString).collect(Collectors.toList());
+  List targetConstraints = new ArrayList<>();
+  for (String targetExpr : targetExprList) {
+if (min == 0 && max == 0) {
+  // anti-affinity
+  targetConstraints.add(new StringBuilder()
+  .append("notin").append(",")
+  .append(getScope()).append(",")
+  .append(targetExpr)
+  .toString());
+} else if (min == 1 && max == Integer.MAX_VALUE) {
+  // affinity
+  targetConstraints.add(new StringBuilder()
+  .append("in").append(",")
+  .append(getScope()).append(",")
+  .append(targetExpr)
+  .toString());
+} else {
+  // cardinality
+  targetConstraints.add(new StringBuilder()
+  .append("cardinality").append(",")
+  .append(getScope()).append(",")
+  .append(targetExpr).append(",")
+  .append(min).append(",")
+  .append(max)
+  .toString());
+}
+  }
+  return String.join(":", targetConstraints);
+}
+
+@Override
 public  T accept(Visitor visitor) {
   return visitor.visit(this);
 }
@@ -326,6 +369,23 @@ public class PlacementConstraint {
 }
 
 @Override
+public String toString() {
+  StringBuffer sb = new StringBuffer();
+  if (TargetType.ALLOCATION_TAG == this.targetType) {
+// following by a comma separated tags
+sb.append(String.join(",", getTargetValues()));
+  } else if (TargetType.NODE_ATTRIBUTE == this.targetType) {
+// following by a comma separated key value pairs
+if (this.getTargetValues() != null) {
+  String attributeName = this.getTargetKey();
+  String attributeValues = String.join(":", this.getTargetValues());
+  sb.append(attributeName + "=[" + attributeValues + "]");
+}
+  }
+  return sb.toString();
+}
+
+@Override
 public  T accept(Visitor visitor) {
   return visitor.visit(this);
 }
@@ -345,7 +405,16 @@ public class PlacementConstraint {
  * 

hadoop git commit: HDFS-12070. Failed block recovery leaves files open indefinitely and at risk for data loss. Contributed by Kihwal Lee.

2018-02-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 23a658c4e -> 4722cd9f3


HDFS-12070. Failed block recovery leaves files open indefinitely and at risk 
for data loss. Contributed by Kihwal Lee.

(cherry picked from commit 4b43f2aa566322317a7f3163027bf5fd0a247207)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4722cd9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4722cd9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4722cd9f

Branch: refs/heads/branch-2.8
Commit: 4722cd9f35a8ff3efb106fe297d48b73c849f776
Parents: 23a658c
Author: Kihwal Lee 
Authored: Mon Feb 26 11:15:06 2018 -0600
Committer: Kihwal Lee 
Committed: Mon Feb 26 11:16:44 2018 -0600

--
 .../server/datanode/BlockRecoveryWorker.java|  6 +--
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 44 
 2 files changed, 46 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4722cd9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 86fead2..b19e51d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -294,10 +294,8 @@ public class BlockRecoveryWorker {
 }
   }
 
-  // If any of the data-nodes failed, the recovery fails, because
-  // we never know the actual state of the replica on failed data-nodes.
-  // The recovery should be started over.
-  if (!failedList.isEmpty()) {
+  // Abort if all failed.
+  if (successList.isEmpty()) {
 StringBuilder b = new StringBuilder();
 for(DatanodeID id : failedList) {
   b.append("\n  " + id);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4722cd9f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index d62194c..c82b47c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -228,6 +228,50 @@ public class TestLeaseRecovery {
   }
 
   /**
+   * Block/lease recovery should be retried with failed nodes from the second
+   * stage removed to avoid perpetual recovery failures.
+   */
+  @Test
+  public void testBlockRecoveryRetryAfterFailedRecovery() throws Exception {
+Configuration conf = new Configuration();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+Path file = new Path("/testBlockRecoveryRetryAfterFailedRecovery");
+DistributedFileSystem dfs = cluster.getFileSystem();
+
+// Create a file.
+FSDataOutputStream out = dfs.create(file);
+final int FILE_SIZE = 128 * 1024;
+int count = 0;
+while (count < FILE_SIZE) {
+  out.writeBytes("DE K9SUL");
+  count += 8;
+}
+out.hsync();
+
+// Abort the original stream.
+((DFSOutputStream) out.getWrappedStream()).abort();
+
+LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
+file.toString(), 0, count);
+ExtendedBlock block = locations.get(0).getBlock();
+
+// Finalize one replica to simulate a partial close failure.
+cluster.getDataNodes().get(0).getFSDataset().finalizeBlock(block, false);
+// Delete the meta file to simulate a rename/move failure.
+cluster.deleteMeta(0, block);
+
+// Try to recover the lease.
+DistributedFileSystem newDfs = (DistributedFileSystem) FileSystem
+.newInstance(cluster.getConfiguration(0));
+count = 0;
+while (count++ < 15 && !newDfs.recoverLease(file)) {
+  Thread.sleep(1000);
+}
+// The lease should have been recovered.
+assertTrue("File should be closed", newDfs.recoverLease(file));
+  }
+
+  /**
* Recover the lease on a file and append file from another client.
*/
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

hadoop git commit: HDFS-12070. Failed block recovery leaves files open indefinitely and at risk for data loss. Contributed by Kihwal Lee.

2018-02-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 627a32375 -> a6343ff80


HDFS-12070. Failed block recovery leaves files open indefinitely and at risk 
for data loss. Contributed by Kihwal Lee.

(cherry picked from commit 4b43f2aa566322317a7f3163027bf5fd0a247207)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a6343ff8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a6343ff8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a6343ff8

Branch: refs/heads/branch-2.9
Commit: a6343ff808dcdabfa11b0f713a445cdb30474fa7
Parents: 627a323
Author: Kihwal Lee 
Authored: Mon Feb 26 10:59:09 2018 -0600
Committer: Kihwal Lee 
Committed: Mon Feb 26 10:59:47 2018 -0600

--
 .../server/datanode/BlockRecoveryWorker.java|  6 +--
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 44 
 2 files changed, 46 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6343ff8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index aa36247..8d218ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -293,10 +293,8 @@ public class BlockRecoveryWorker {
 }
   }
 
-  // If any of the data-nodes failed, the recovery fails, because
-  // we never know the actual state of the replica on failed data-nodes.
-  // The recovery should be started over.
-  if (!failedList.isEmpty()) {
+  // Abort if all failed.
+  if (successList.isEmpty()) {
 StringBuilder b = new StringBuilder();
 for(DatanodeID id : failedList) {
   b.append("\n  " + id);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6343ff8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index d62194c..c82b47c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -228,6 +228,50 @@ public class TestLeaseRecovery {
   }
 
   /**
+   * Block/lease recovery should be retried with failed nodes from the second
+   * stage removed to avoid perpetual recovery failures.
+   */
+  @Test
+  public void testBlockRecoveryRetryAfterFailedRecovery() throws Exception {
+Configuration conf = new Configuration();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+Path file = new Path("/testBlockRecoveryRetryAfterFailedRecovery");
+DistributedFileSystem dfs = cluster.getFileSystem();
+
+// Create a file.
+FSDataOutputStream out = dfs.create(file);
+final int FILE_SIZE = 128 * 1024;
+int count = 0;
+while (count < FILE_SIZE) {
+  out.writeBytes("DE K9SUL");
+  count += 8;
+}
+out.hsync();
+
+// Abort the original stream.
+((DFSOutputStream) out.getWrappedStream()).abort();
+
+LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
+file.toString(), 0, count);
+ExtendedBlock block = locations.get(0).getBlock();
+
+// Finalize one replica to simulate a partial close failure.
+cluster.getDataNodes().get(0).getFSDataset().finalizeBlock(block, false);
+// Delete the meta file to simulate a rename/move failure.
+cluster.deleteMeta(0, block);
+
+// Try to recover the lease.
+DistributedFileSystem newDfs = (DistributedFileSystem) FileSystem
+.newInstance(cluster.getConfiguration(0));
+count = 0;
+while (count++ < 15 && !newDfs.recoverLease(file)) {
+  Thread.sleep(1000);
+}
+// The lease should have been recovered.
+assertTrue("File should be closed", newDfs.recoverLease(file));
+  }
+
+  /**
* Recover the lease on a file and append file from another client.
*/
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

hadoop git commit: HDFS-12070. Failed block recovery leaves files open indefinitely and at risk for data loss. Contributed by Kihwal Lee.

2018-02-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 79af42f09 -> 4b43f2aa5


HDFS-12070. Failed block recovery leaves files open indefinitely and at risk 
for data loss. Contributed by Kihwal Lee.

(cherry picked from commit 451265a83d8798624ae2a144bc58fa41db826704)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b43f2aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b43f2aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b43f2aa

Branch: refs/heads/branch-2
Commit: 4b43f2aa566322317a7f3163027bf5fd0a247207
Parents: 79af42f
Author: Kihwal Lee 
Authored: Mon Feb 26 10:58:07 2018 -0600
Committer: Kihwal Lee 
Committed: Mon Feb 26 10:58:07 2018 -0600

--
 .../server/datanode/BlockRecoveryWorker.java|  6 +--
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 44 
 2 files changed, 46 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b43f2aa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index aa36247..8d218ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -293,10 +293,8 @@ public class BlockRecoveryWorker {
 }
   }
 
-  // If any of the data-nodes failed, the recovery fails, because
-  // we never know the actual state of the replica on failed data-nodes.
-  // The recovery should be started over.
-  if (!failedList.isEmpty()) {
+  // Abort if all failed.
+  if (successList.isEmpty()) {
 StringBuilder b = new StringBuilder();
 for(DatanodeID id : failedList) {
   b.append("\n  " + id);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b43f2aa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index d62194c..c82b47c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -228,6 +228,50 @@ public class TestLeaseRecovery {
   }
 
   /**
+   * Block/lease recovery should be retried with failed nodes from the second
+   * stage removed to avoid perpetual recovery failures.
+   */
+  @Test
+  public void testBlockRecoveryRetryAfterFailedRecovery() throws Exception {
+Configuration conf = new Configuration();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+Path file = new Path("/testBlockRecoveryRetryAfterFailedRecovery");
+DistributedFileSystem dfs = cluster.getFileSystem();
+
+// Create a file.
+FSDataOutputStream out = dfs.create(file);
+final int FILE_SIZE = 128 * 1024;
+int count = 0;
+while (count < FILE_SIZE) {
+  out.writeBytes("DE K9SUL");
+  count += 8;
+}
+out.hsync();
+
+// Abort the original stream.
+((DFSOutputStream) out.getWrappedStream()).abort();
+
+LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
+file.toString(), 0, count);
+ExtendedBlock block = locations.get(0).getBlock();
+
+// Finalize one replica to simulate a partial close failure.
+cluster.getDataNodes().get(0).getFSDataset().finalizeBlock(block, false);
+// Delete the meta file to simulate a rename/move failure.
+cluster.deleteMeta(0, block);
+
+// Try to recover the lease.
+DistributedFileSystem newDfs = (DistributedFileSystem) FileSystem
+.newInstance(cluster.getConfiguration(0));
+count = 0;
+while (count++ < 15 && !newDfs.recoverLease(file)) {
+  Thread.sleep(1000);
+}
+// The lease should have been recovered.
+assertTrue("File should be closed", newDfs.recoverLease(file));
+  }
+
+  /**
* Recover the lease on a file and append file from another client.
*/
   @Test



hadoop git commit: HDFS-12070. Failed block recovery leaves files open indefinitely and at risk for data loss. Contributed by Kihwal Lee.

2018-02-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 21d4b5fd2 -> 1087b9af8


HDFS-12070. Failed block recovery leaves files open indefinitely and at risk 
for data loss. Contributed by Kihwal Lee.

(cherry picked from commit 451265a83d8798624ae2a144bc58fa41db826704)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1087b9af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1087b9af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1087b9af

Branch: refs/heads/branch-3.0
Commit: 1087b9af8c34742bdcf90f2e5b809bddb9f79315
Parents: 21d4b5f
Author: Kihwal Lee 
Authored: Mon Feb 26 10:30:50 2018 -0600
Committer: Kihwal Lee 
Committed: Mon Feb 26 10:30:50 2018 -0600

--
 .../server/datanode/BlockRecoveryWorker.java|  6 +--
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 44 
 2 files changed, 46 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1087b9af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 2ecd986..94835e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -307,10 +307,8 @@ public class BlockRecoveryWorker {
 }
   }
 
-  // If any of the data-nodes failed, the recovery fails, because
-  // we never know the actual state of the replica on failed data-nodes.
-  // The recovery should be started over.
-  if (!failedList.isEmpty()) {
+  // Abort if all failed.
+  if (successList.isEmpty()) {
 throw new IOException("Cannot recover " + block
 + ", the following datanodes failed: " + failedList);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1087b9af/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index d62194c..c82b47c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -228,6 +228,50 @@ public class TestLeaseRecovery {
   }
 
   /**
+   * Block/lease recovery should be retried with failed nodes from the second
+   * stage removed to avoid perpetual recovery failures.
+   */
+  @Test
+  public void testBlockRecoveryRetryAfterFailedRecovery() throws Exception {
+Configuration conf = new Configuration();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+Path file = new Path("/testBlockRecoveryRetryAfterFailedRecovery");
+DistributedFileSystem dfs = cluster.getFileSystem();
+
+// Create a file.
+FSDataOutputStream out = dfs.create(file);
+final int FILE_SIZE = 128 * 1024;
+int count = 0;
+while (count < FILE_SIZE) {
+  out.writeBytes("DE K9SUL");
+  count += 8;
+}
+out.hsync();
+
+// Abort the original stream.
+((DFSOutputStream) out.getWrappedStream()).abort();
+
+LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
+file.toString(), 0, count);
+ExtendedBlock block = locations.get(0).getBlock();
+
+// Finalize one replica to simulate a partial close failure.
+cluster.getDataNodes().get(0).getFSDataset().finalizeBlock(block, false);
+// Delete the meta file to simulate a rename/move failure.
+cluster.deleteMeta(0, block);
+
+// Try to recover the lease.
+DistributedFileSystem newDfs = (DistributedFileSystem) FileSystem
+.newInstance(cluster.getConfiguration(0));
+count = 0;
+while (count++ < 15 && !newDfs.recoverLease(file)) {
+  Thread.sleep(1000);
+}
+// The lease should have been recovered.
+assertTrue("File should be closed", newDfs.recoverLease(file));
+  }
+
+  /**
* Recover the lease on a file and append file from another client.
*/
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

hadoop git commit: HDFS-12070. Failed block recovery leaves files open indefinitely and at risk for data loss. Contributed by Kihwal Lee.

2018-02-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 cb260a2d3 -> 33f82323b


HDFS-12070. Failed block recovery leaves files open indefinitely and at risk 
for data loss. Contributed by Kihwal Lee.

(cherry picked from commit 451265a83d8798624ae2a144bc58fa41db826704)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33f82323
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33f82323
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33f82323

Branch: refs/heads/branch-3.1
Commit: 33f82323b0db22f1dc884ba59bbc367311c0
Parents: cb260a2
Author: Kihwal Lee 
Authored: Mon Feb 26 10:29:28 2018 -0600
Committer: Kihwal Lee 
Committed: Mon Feb 26 10:29:28 2018 -0600

--
 .../server/datanode/BlockRecoveryWorker.java|  6 +--
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 44 
 2 files changed, 46 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33f82323/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 2ecd986..94835e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -307,10 +307,8 @@ public class BlockRecoveryWorker {
 }
   }
 
-  // If any of the data-nodes failed, the recovery fails, because
-  // we never know the actual state of the replica on failed data-nodes.
-  // The recovery should be started over.
-  if (!failedList.isEmpty()) {
+  // Abort if all failed.
+  if (successList.isEmpty()) {
 throw new IOException("Cannot recover " + block
 + ", the following datanodes failed: " + failedList);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33f82323/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index d62194c..c82b47c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -228,6 +228,50 @@ public class TestLeaseRecovery {
   }
 
   /**
+   * Block/lease recovery should be retried with failed nodes from the second
+   * stage removed to avoid perpetual recovery failures.
+   */
+  @Test
+  public void testBlockRecoveryRetryAfterFailedRecovery() throws Exception {
+Configuration conf = new Configuration();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+Path file = new Path("/testBlockRecoveryRetryAfterFailedRecovery");
+DistributedFileSystem dfs = cluster.getFileSystem();
+
+// Create a file.
+FSDataOutputStream out = dfs.create(file);
+final int FILE_SIZE = 128 * 1024;
+int count = 0;
+while (count < FILE_SIZE) {
+  out.writeBytes("DE K9SUL");
+  count += 8;
+}
+out.hsync();
+
+// Abort the original stream.
+((DFSOutputStream) out.getWrappedStream()).abort();
+
+LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
+file.toString(), 0, count);
+ExtendedBlock block = locations.get(0).getBlock();
+
+// Finalize one replica to simulate a partial close failure.
+cluster.getDataNodes().get(0).getFSDataset().finalizeBlock(block, false);
+// Delete the meta file to simulate a rename/move failure.
+cluster.deleteMeta(0, block);
+
+// Try to recover the lease.
+DistributedFileSystem newDfs = (DistributedFileSystem) FileSystem
+.newInstance(cluster.getConfiguration(0));
+count = 0;
+while (count++ < 15 && !newDfs.recoverLease(file)) {
+  Thread.sleep(1000);
+}
+// The lease should have been recovered.
+assertTrue("File should be closed", newDfs.recoverLease(file));
+  }
+
+  /**
* Recover the lease on a file and append file from another client.
*/
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

hadoop git commit: HDFS-12070. Failed block recovery leaves files open indefinitely and at risk for data loss. Contributed by Kihwal Lee.

2018-02-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2fa7963c3 -> 451265a83


HDFS-12070. Failed block recovery leaves files open indefinitely and at risk 
for data loss. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/451265a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/451265a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/451265a8

Branch: refs/heads/trunk
Commit: 451265a83d8798624ae2a144bc58fa41db826704
Parents: 2fa7963
Author: Kihwal Lee 
Authored: Mon Feb 26 10:28:04 2018 -0600
Committer: Kihwal Lee 
Committed: Mon Feb 26 10:28:04 2018 -0600

--
 .../server/datanode/BlockRecoveryWorker.java|  6 +--
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 44 
 2 files changed, 46 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/451265a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 2ecd986..94835e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -307,10 +307,8 @@ public class BlockRecoveryWorker {
 }
   }
 
-  // If any of the data-nodes failed, the recovery fails, because
-  // we never know the actual state of the replica on failed data-nodes.
-  // The recovery should be started over.
-  if (!failedList.isEmpty()) {
+  // Abort if all failed.
+  if (successList.isEmpty()) {
 throw new IOException("Cannot recover " + block
 + ", the following datanodes failed: " + failedList);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/451265a8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index d62194c..c82b47c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -228,6 +228,50 @@ public class TestLeaseRecovery {
   }
 
   /**
+   * Block/lease recovery should be retried with failed nodes from the second
+   * stage removed to avoid perpetual recovery failures.
+   */
+  @Test
+  public void testBlockRecoveryRetryAfterFailedRecovery() throws Exception {
+Configuration conf = new Configuration();
+cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+Path file = new Path("/testBlockRecoveryRetryAfterFailedRecovery");
+DistributedFileSystem dfs = cluster.getFileSystem();
+
+// Create a file.
+FSDataOutputStream out = dfs.create(file);
+final int FILE_SIZE = 128 * 1024;
+int count = 0;
+while (count < FILE_SIZE) {
+  out.writeBytes("DE K9SUL");
+  count += 8;
+}
+out.hsync();
+
+// Abort the original stream.
+((DFSOutputStream) out.getWrappedStream()).abort();
+
+LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
+file.toString(), 0, count);
+ExtendedBlock block = locations.get(0).getBlock();
+
+// Finalize one replica to simulate a partial close failure.
+cluster.getDataNodes().get(0).getFSDataset().finalizeBlock(block, false);
+// Delete the meta file to simulate a rename/move failure.
+cluster.deleteMeta(0, block);
+
+// Try to recover the lease.
+DistributedFileSystem newDfs = (DistributedFileSystem) FileSystem
+.newInstance(cluster.getConfiguration(0));
+count = 0;
+while (count++ < 15 && !newDfs.recoverLease(file)) {
+  Thread.sleep(1000);
+}
+// The lease should have been recovered.
+assertTrue("File should be closed", newDfs.recoverLease(file));
+  }
+
+  /**
* Recover the lease on a file and append file from another client.
*/
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HDFS-13164. File not closed if streamer fail with DSQuotaExceededException."

2018-02-26 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8763d07f9 -> 23a658c4e


Revert "HDFS-13164. File not closed if streamer fail with 
DSQuotaExceededException."

This reverts commit 8763d07f97c4667566badabc2ec2e2cd9ae92c0e.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23a658c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23a658c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23a658c4

Branch: refs/heads/branch-2.8
Commit: 23a658c4e7e1cb486bf5a83ddd1fb4272e0450c8
Parents: 8763d07
Author: Jason Lowe 
Authored: Mon Feb 26 08:59:11 2018 -0600
Committer: Jason Lowe 
Committed: Mon Feb 26 08:59:11 2018 -0600

--
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  63 ++-
 .../hadoop/hdfs/client/impl/LeaseRenewer.java   |   2 +-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  | 109 +--
 3 files changed, 10 insertions(+), 164 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23a658c4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index b3245a5..09d3143 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -815,19 +815,7 @@ public class DFSOutputStream extends FSOutputSummer
 
   protected synchronized void closeImpl() throws IOException {
 if (isClosed()) {
-  LOG.debug("Closing an already closed stream. [Stream:{}, streamer:{}]",
-  closed, getStreamer().streamerClosed());
-  try {
-getStreamer().getLastException().check(true);
-  } catch (IOException ioe) {
-cleanupAndRethrowIOException(ioe);
-  } finally {
-if (!closed) {
-  // If stream is not closed but streamer closed, clean up the stream.
-  // Most importantly, end the file lease.
-  closeThreads(true);
-}
-  }
+  getStreamer().getLastException().check(true);
   return;
 }
 
@@ -842,12 +830,14 @@ public class DFSOutputStream extends FSOutputSummer
 setCurrentPacketToEmpty();
   }
 
-  try {
-flushInternal(); // flush all data to Datanodes
-  } catch (IOException ioe) {
-cleanupAndRethrowIOException(ioe);
+  flushInternal(); // flush all data to Datanodes
+  // get last block before destroying the streamer
+  ExtendedBlock lastBlock = getStreamer().getBlock();
+
+  try (TraceScope ignored =
+   dfsClient.getTracer().newScope("completeFile")) {
+completeFile(lastBlock);
   }
-  completeFile();
 } catch (ClosedChannelException ignored) {
 } finally {
   // Failures may happen when flushing data.
@@ -859,43 +849,6 @@ public class DFSOutputStream extends FSOutputSummer
 }
   }
 
-  private void completeFile() throws IOException {
-// get last block before destroying the streamer
-ExtendedBlock lastBlock = getStreamer().getBlock();
-try (TraceScope ignored =
-dfsClient.getTracer().newScope("completeFile")) {
-  completeFile(lastBlock);
-}
-  }
-
-  /**
-   * Determines whether an IOException thrown needs extra cleanup on the 
stream.
-   * Space quota exceptions will be thrown when getting new blocks, so the
-   * open HDFS file need to be closed.
-   *
-   * @param ioe the IOException
-   * @return whether the stream needs cleanup for the given IOException
-   */
-  private boolean exceptionNeedsCleanup(IOException ioe) {
-return ioe instanceof DSQuotaExceededException
-|| ioe instanceof QuotaByStorageTypeExceededException;
-  }
-
-  private void cleanupAndRethrowIOException(IOException ioe)
-  throws IOException {
-if (exceptionNeedsCleanup(ioe)) {
-  final MultipleIOException.Builder b = new MultipleIOException.Builder();
-  b.add(ioe);
-  try {
-completeFile();
-  } catch (IOException e) {
-b.add(e);
-throw b.build();
-  }
-}
-throw ioe;
-  }
-
   // should be called holding (this) lock since setTestFilename() may
   // be called during unit tests
   protected void completeFile(ExtendedBlock last) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23a658c4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java