[2/2] hadoop git commit: HDFS-13056. Expose file-level composite CRCs in HDFS which are comparable across different instances/layouts. Contributed by Dennis Huo.

2018-04-10 Thread xiao
HDFS-13056. Expose file-level composite CRCs in HDFS which are comparable 
across different instances/layouts. Contributed by Dennis Huo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c9cdad6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c9cdad6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c9cdad6

Branch: refs/heads/trunk
Commit: 7c9cdad6d04c98db5a83e2108219bf6e6c903daf
Parents: 6cc59a0
Author: Xiao Chen 
Authored: Tue Apr 10 20:56:07 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 21:31:48 2018 -0700

--
 .../hadoop/fs/CompositeCrcFileChecksum.java |  82 +
 .../main/java/org/apache/hadoop/fs/Options.java |  11 +
 .../org/apache/hadoop/util/CrcComposer.java | 187 ++
 .../java/org/apache/hadoop/util/CrcUtil.java| 220 +++
 .../org/apache/hadoop/util/DataChecksum.java|  18 +
 .../org/apache/hadoop/util/TestCrcComposer.java | 242 
 .../org/apache/hadoop/util/TestCrcUtil.java | 232 
 .../main/java/org/apache/hadoop/fs/Hdfs.java|   4 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  56 ++-
 .../hadoop/hdfs/DistributedFileSystem.java  |   5 +-
 .../apache/hadoop/hdfs/FileChecksumHelper.java  | 365 +--
 .../hdfs/client/HdfsClientConfigKeys.java   |   2 +
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  27 ++
 .../hdfs/protocol/BlockChecksumOptions.java |  54 +++
 .../hadoop/hdfs/protocol/BlockChecksumType.java |  30 ++
 .../datatransfer/DataTransferProtocol.java  |  12 +-
 .../hdfs/protocol/datatransfer/Sender.java  |  11 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  44 +++
 .../src/main/proto/datatransfer.proto   |   7 +-
 .../src/main/proto/hdfs.proto   |  21 ++
 .../hdfs/protocol/datatransfer/Receiver.java|   8 +-
 .../server/datanode/BlockChecksumHelper.java| 289 +--
 .../hdfs/server/datanode/DataXceiver.java   |  26 +-
 ...dBlockChecksumCompositeCrcReconstructor.java |  80 
 ...StripedBlockChecksumMd5CrcReconstructor.java |  74 
 .../StripedBlockChecksumReconstructor.java  |  66 ++--
 .../erasurecode/StripedBlockReconstructor.java  |   1 +
 .../src/main/resources/hdfs-default.xml |  11 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  31 +-
 .../apache/hadoop/hdfs/TestFileChecksum.java| 101 -
 .../hdfs/TestFileChecksumCompositeCrc.java  |  47 +++
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|  14 +
 .../hadoop/tools/mapred/TestCopyMapper.java | 173 +++--
 .../mapred/TestCopyMapperCompositeCrc.java  |  50 +++
 34 files changed, 2359 insertions(+), 242 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c9cdad6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CompositeCrcFileChecksum.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CompositeCrcFileChecksum.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CompositeCrcFileChecksum.java
new file mode 100644
index 000..e1ed5cb
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CompositeCrcFileChecksum.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.util.CrcUtil;
+import org.apache.hadoop.util.DataChecksum;
+
+/** Composite CRC. */
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Unstable
+public class CompositeCrcFileChecksum extends FileChecksum {
+  public static final int LENGTH = Integer.SIZE / Byte.SIZE;
+
+  private int crc;
+  

[2/2] hadoop git commit: MAPREDUCE-7062. Update mapreduce.job.tags description for making use for ATSv2 purpose. Contributed by Charan Hebri.

2018-04-10 Thread rohithsharmaks
MAPREDUCE-7062. Update mapreduce.job.tags description for making use for ATSv2 
purpose. Contributed by Charan Hebri.

(cherry picked from commit 6cc59a09e7330dc893b386d84c8f2aa86c02eace)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/754e5189
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/754e5189
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/754e5189

Branch: refs/heads/branch-2
Commit: 754e5189ee485dfa67b4243e14489aa7c3f052df
Parents: fe4c7ee
Author: Rohith Sharma K S 
Authored: Wed Apr 11 09:45:39 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed Apr 11 09:52:17 2018 +0530

--
 .../src/main/resources/mapred-default.xml  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/754e5189/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 4f89762..a33960c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1052,6 +1052,12 @@
 
  Tags for the job that will be passed to YARN at submission
   time. Queries to YARN for applications can filter on these tags.
+  If these tags are intended to be used with The YARN Timeline Service v.2,
+  prefix them with the appropriate tag names for flow name, flow version 
and
+  flow run id. Example:
+  timeline_flow_name_tag:foo,
+  timeline_flow_version_tag:3df8b0d6100530080d2e0decf9e528e57c42a90a,
+  timeline_flow_run_id_tag:1465246348599
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions configuration. Contributed by Rohith Sharma K S

2018-04-10 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 95cedc558 -> 754e5189e


YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions 
configuration. Contributed by Rohith Sharma K S

(cherry picked from commit 345e7624d58a058a1bad666bd1e5ce4b346a9056)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe4c7ee9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe4c7ee9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe4c7ee9

Branch: refs/heads/branch-2
Commit: fe4c7ee94c562d09c16e471223a047841bfaef9d
Parents: 95cedc5
Author: Vrushali C 
Authored: Wed Apr 4 15:08:03 2018 -0700
Committer: Rohith Sharma K S 
Committed: Wed Apr 11 09:51:42 2018 +0530

--
 .../jobhistory/JobHistoryEventHandler.java  |  2 +-
 .../hadoop/yarn/conf/YarnConfiguration.java | 21 
 .../client/api/impl/TimelineClientImpl.java | 23 +
 .../yarn/util/timeline/TimelineUtils.java   |  3 +--
 .../TestCombinedSystemMetricsPublisher.java | 26 
 .../reader/TimelineReaderServer.java|  2 +-
 6 files changed, 61 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4c7ee9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 51f3538..4529d55 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -269,7 +269,7 @@ public class JobHistoryEventHandler extends AbstractService
   LOG.info("Emitting job history data to the timeline service is enabled");
   if (YarnConfiguration.timelineServiceEnabled(conf)) {
 boolean timelineServiceV2Enabled =
-((int) YarnConfiguration.getTimelineServiceVersion(conf) == 2);
+YarnConfiguration.timelineServiceV2Enabled(conf);
 if(timelineServiceV2Enabled) {
   timelineV2Client =
   ((MRAppMaster.RunningAppContext)context).getTimelineV2Client();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4c7ee9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index d6e4388..b058e83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3487,6 +3487,27 @@ public class YarnConfiguration extends Configuration {
   }
 
   /**
+   * Returns whether the timeline service v.1,5 is enabled via configuration.
+   *
+   * @param conf the configuration
+   * @return whether the timeline service v.1.5 is enabled. V.1.5 refers to a
+   * version equal to 1.5.
+   */
+  public static boolean timelineServiceV15Enabled(Configuration conf) {
+boolean enabled = false;
+if (timelineServiceEnabled(conf)) {
+  Collection versions = getTimelineServiceVersions(conf);
+  for (Float version : versions) {
+if (Float.compare(version, 1.5f) == 0) {
+  enabled = true;
+  break;
+}
+  }
+}
+return enabled;
+  }
+
+  /**
* Returns all the active timeline service versions. It does not check
* whether the timeline service itself is enabled.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe4c7ee9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 

[2/2] hadoop git commit: MAPREDUCE-7062. Update mapreduce.job.tags description for making use for ATSv2 purpose. Contributed by Charan Hebri.

2018-04-10 Thread rohithsharmaks
MAPREDUCE-7062. Update mapreduce.job.tags description for making use for ATSv2 
purpose. Contributed by Charan Hebri.

(cherry picked from commit 6cc59a09e7330dc893b386d84c8f2aa86c02eace)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7cca3486
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7cca3486
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7cca3486

Branch: refs/heads/branch-3.0
Commit: 7cca3486b7189fadb187b4706f482d868ada8d52
Parents: ec258b8
Author: Rohith Sharma K S 
Authored: Wed Apr 11 09:45:39 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed Apr 11 09:49:38 2018 +0530

--
 .../src/main/resources/mapred-default.xml  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cca3486/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index db36e91..16ecd90 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1023,6 +1023,12 @@
 
  Tags for the job that will be passed to YARN at submission
   time. Queries to YARN for applications can filter on these tags.
+  If these tags are intended to be used with The YARN Timeline Service v.2,
+  prefix them with the appropriate tag names for flow name, flow version 
and
+  flow run id. Example:
+  timeline_flow_name_tag:foo,
+  timeline_flow_version_tag:3df8b0d6100530080d2e0decf9e528e57c42a90a,
+  timeline_flow_run_id_tag:1465246348599
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions configuration. Contributed by Rohith Sharma K S

2018-04-10 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 72acda144 -> 7cca3486b


YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions 
configuration. Contributed by Rohith Sharma K S

(cherry picked from commit 345e7624d58a058a1bad666bd1e5ce4b346a9056)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec258b8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec258b8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec258b8e

Branch: refs/heads/branch-3.0
Commit: ec258b8ed147a6a7f6dd734ed97b79761717825a
Parents: 72acda1
Author: Vrushali C 
Authored: Wed Apr 4 15:08:03 2018 -0700
Committer: Rohith Sharma K S 
Committed: Wed Apr 11 09:49:25 2018 +0530

--
 .../jobhistory/JobHistoryEventHandler.java  |  2 +-
 .../hadoop/yarn/conf/YarnConfiguration.java | 21 
 .../client/api/impl/TimelineClientImpl.java | 23 +
 .../yarn/util/timeline/TimelineUtils.java   |  3 +--
 .../TestCombinedSystemMetricsPublisher.java | 26 
 .../reader/TimelineReaderServer.java|  2 +-
 6 files changed, 61 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec258b8e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index ae46129..cef26d3 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -269,7 +269,7 @@ public class JobHistoryEventHandler extends AbstractService
   LOG.info("Emitting job history data to the timeline service is enabled");
   if (YarnConfiguration.timelineServiceEnabled(conf)) {
 boolean timelineServiceV2Enabled =
-((int) YarnConfiguration.getTimelineServiceVersion(conf) == 2);
+YarnConfiguration.timelineServiceV2Enabled(conf);
 if(timelineServiceV2Enabled) {
   timelineV2Client =
   ((MRAppMaster.RunningAppContext)context).getTimelineV2Client();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec258b8e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 91bc5ba..8853ae6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3520,6 +3520,27 @@ public class YarnConfiguration extends Configuration {
   }
 
   /**
+   * Returns whether the timeline service v.1,5 is enabled via configuration.
+   *
+   * @param conf the configuration
+   * @return whether the timeline service v.1.5 is enabled. V.1.5 refers to a
+   * version equal to 1.5.
+   */
+  public static boolean timelineServiceV15Enabled(Configuration conf) {
+boolean enabled = false;
+if (timelineServiceEnabled(conf)) {
+  Collection versions = getTimelineServiceVersions(conf);
+  for (Float version : versions) {
+if (Float.compare(version, 1.5f) == 0) {
+  enabled = true;
+  break;
+}
+  }
+}
+return enabled;
+  }
+
+  /**
* Returns all the active timeline service versions. It does not check
* whether the timeline service itself is enabled.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec258b8e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 

hadoop git commit: YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions configuration. Contributed by Rohith Sharma K S

2018-04-10 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/YARN-7055 62e93da29 -> 5bea88f7d


YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions 
configuration. Contributed by Rohith Sharma K S

(cherry picked from commit 345e7624d58a058a1bad666bd1e5ce4b346a9056)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5bea88f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5bea88f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5bea88f7

Branch: refs/heads/YARN-7055
Commit: 5bea88f7da72ea047e333a66e6b2bf4d8537c634
Parents: 62e93da
Author: Vrushali C 
Authored: Wed Apr 4 15:08:03 2018 -0700
Committer: Rohith Sharma K S 
Committed: Wed Apr 11 09:53:41 2018 +0530

--
 .../jobhistory/JobHistoryEventHandler.java  |  2 +-
 .../hadoop/yarn/conf/YarnConfiguration.java | 21 
 .../client/api/impl/TimelineClientImpl.java | 23 +
 .../yarn/util/timeline/TimelineUtils.java   |  3 +--
 .../TestCombinedSystemMetricsPublisher.java | 26 
 .../reader/TimelineReaderServer.java|  2 +-
 6 files changed, 61 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bea88f7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index fd93d07..52c13f1 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -269,7 +269,7 @@ public class JobHistoryEventHandler extends AbstractService
   LOG.info("Emitting job history data to the timeline service is enabled");
   if (YarnConfiguration.timelineServiceEnabled(conf)) {
 boolean timelineServiceV2Enabled =
-((int) YarnConfiguration.getTimelineServiceVersion(conf) == 2);
+YarnConfiguration.timelineServiceV2Enabled(conf);
 if(timelineServiceV2Enabled) {
   timelineV2Client =
   ((MRAppMaster.RunningAppContext)context).getTimelineV2Client();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bea88f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6677478..7fb9d73 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3759,6 +3759,27 @@ public class YarnConfiguration extends Configuration {
   }
 
   /**
+   * Returns whether the timeline service v.1,5 is enabled via configuration.
+   *
+   * @param conf the configuration
+   * @return whether the timeline service v.1.5 is enabled. V.1.5 refers to a
+   * version equal to 1.5.
+   */
+  public static boolean timelineServiceV15Enabled(Configuration conf) {
+boolean enabled = false;
+if (timelineServiceEnabled(conf)) {
+  Collection versions = getTimelineServiceVersions(conf);
+  for (Float version : versions) {
+if (Float.compare(version, 1.5f) == 0) {
+  enabled = true;
+  break;
+}
+  }
+}
+return enabled;
+  }
+
+  /**
* Returns all the active timeline service versions. It does not check
* whether the timeline service itself is enabled.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bea88f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 

[2/2] hadoop git commit: MAPREDUCE-7062. Update mapreduce.job.tags description for making use for ATSv2 purpose. Contributed by Charan Hebri.

2018-04-10 Thread rohithsharmaks
MAPREDUCE-7062. Update mapreduce.job.tags description for making use for ATSv2 
purpose. Contributed by Charan Hebri.

(cherry picked from commit 6cc59a09e7330dc893b386d84c8f2aa86c02eace)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb6555df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb6555df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb6555df

Branch: refs/heads/branch-3.1
Commit: cb6555df24b333b79a23235aba86565ff6ebd2d7
Parents: 67cd7ac
Author: Rohith Sharma K S 
Authored: Wed Apr 11 09:45:39 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed Apr 11 09:47:15 2018 +0530

--
 .../src/main/resources/mapred-default.xml  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb6555df/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index cf8be33..d47c0ff 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1045,6 +1045,12 @@
 
  Tags for the job that will be passed to YARN at submission
   time. Queries to YARN for applications can filter on these tags.
+  If these tags are intended to be used with The YARN Timeline Service v.2,
+  prefix them with the appropriate tag names for flow name, flow version 
and
+  flow run id. Example:
+  timeline_flow_name_tag:foo,
+  timeline_flow_version_tag:3df8b0d6100530080d2e0decf9e528e57c42a90a,
+  timeline_flow_run_id_tag:1465246348599
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions configuration. Contributed by Rohith Sharma K S

2018-04-10 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 c1b4c6adf -> cb6555df2


YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions 
configuration. Contributed by Rohith Sharma K S

(cherry picked from commit 345e7624d58a058a1bad666bd1e5ce4b346a9056)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67cd7acf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67cd7acf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67cd7acf

Branch: refs/heads/branch-3.1
Commit: 67cd7acf993f1b3ef24b98567485574752febbd4
Parents: c1b4c6a
Author: Vrushali C 
Authored: Wed Apr 4 15:08:03 2018 -0700
Committer: Rohith Sharma K S 
Committed: Wed Apr 11 09:41:04 2018 +0530

--
 .../jobhistory/JobHistoryEventHandler.java  |  2 +-
 .../hadoop/yarn/conf/YarnConfiguration.java | 21 
 .../client/api/impl/TimelineClientImpl.java | 23 +
 .../yarn/util/timeline/TimelineUtils.java   |  3 +--
 .../TestCombinedSystemMetricsPublisher.java | 26 
 .../reader/TimelineReaderServer.java|  2 +-
 6 files changed, 61 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67cd7acf/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index fd93d07..52c13f1 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -269,7 +269,7 @@ public class JobHistoryEventHandler extends AbstractService
   LOG.info("Emitting job history data to the timeline service is enabled");
   if (YarnConfiguration.timelineServiceEnabled(conf)) {
 boolean timelineServiceV2Enabled =
-((int) YarnConfiguration.getTimelineServiceVersion(conf) == 2);
+YarnConfiguration.timelineServiceV2Enabled(conf);
 if(timelineServiceV2Enabled) {
   timelineV2Client =
   ((MRAppMaster.RunningAppContext)context).getTimelineV2Client();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67cd7acf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 5a2c1f9..a2cc9b7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3781,6 +3781,27 @@ public class YarnConfiguration extends Configuration {
   }
 
   /**
+   * Returns whether the timeline service v.1,5 is enabled via configuration.
+   *
+   * @param conf the configuration
+   * @return whether the timeline service v.1.5 is enabled. V.1.5 refers to a
+   * version equal to 1.5.
+   */
+  public static boolean timelineServiceV15Enabled(Configuration conf) {
+boolean enabled = false;
+if (timelineServiceEnabled(conf)) {
+  Collection versions = getTimelineServiceVersions(conf);
+  for (Float version : versions) {
+if (Float.compare(version, 1.5f) == 0) {
+  enabled = true;
+  break;
+}
+  }
+}
+return enabled;
+  }
+
+  /**
* Returns all the active timeline service versions. It does not check
* whether the timeline service itself is enabled.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67cd7acf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 

hadoop git commit: MAPREDUCE-7062. Update mapreduce.job.tags description for making use for ATSv2 purpose. Contributed by Charan Hebri.

2018-04-10 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk c04871109 -> 6cc59a09e


MAPREDUCE-7062. Update mapreduce.job.tags description for making use for ATSv2 
purpose. Contributed by Charan Hebri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6cc59a09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6cc59a09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6cc59a09

Branch: refs/heads/trunk
Commit: 6cc59a09e7330dc893b386d84c8f2aa86c02eace
Parents: c048711
Author: Rohith Sharma K S 
Authored: Wed Apr 11 09:45:39 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed Apr 11 09:45:39 2018 +0530

--
 .../src/main/resources/mapred-default.xml  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cc59a09/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index cf8be33..d47c0ff 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1045,6 +1045,12 @@
 
  Tags for the job that will be passed to YARN at submission
   time. Queries to YARN for applications can filter on these tags.
+  If these tags are intended to be used with The YARN Timeline Service v.2,
+  prefix them with the appropriate tag names for flow name, flow version 
and
+  flow run id. Example:
+  timeline_flow_name_tag:foo,
+  timeline_flow_version_tag:3df8b0d6100530080d2e0decf9e528e57c42a90a,
+  timeline_flow_run_id_tag:1465246348599
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7941. Transitive dependencies for component are not resolved. Contributed by Billie Rinaldi.

2018-04-10 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 53abef4e3 -> c1b4c6adf


YARN-7941. Transitive dependencies for component are not resolved. Contributed 
by Billie Rinaldi.

(cherry picked from commit c0487110990958fa985d273eb178bdf76002cf3a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1b4c6ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1b4c6ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1b4c6ad

Branch: refs/heads/branch-3.1
Commit: c1b4c6adf4e6fc733ed633f79f717de2950d607e
Parents: 53abef4
Author: Rohith Sharma K S 
Authored: Wed Apr 11 09:18:50 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed Apr 11 09:19:48 2018 +0530

--
 .../hadoop/yarn/service/component/Component.java|  1 +
 .../hadoop/yarn/service/TestYarnNativeServices.java | 16 
 2 files changed, 13 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1b4c6ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index 3090692..9e10ae6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -164,6 +164,7 @@ public class Component implements 
EventHandler {
 maxContainerFailurePerComp = componentSpec.getConfiguration()
 .getPropertyInt(CONTAINER_FAILURE_THRESHOLD, 10);
 createNumCompInstances(component.getNumberOfContainers());
+setDesiredContainers(component.getNumberOfContainers().intValue());
   }
 
   private void createNumCompInstances(long count) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1b4c6ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index 091e624..51a190e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
@@ -137,7 +137,9 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 
   // Create compa with 2 containers
   // Create compb with 2 containers which depends on compa
-  // Check containers for compa started before containers for compb
+  // Create compc with 2 containers which depends on compb
+  // Check containers for compa started before containers for compb before
+  // containers for compc
   @Test (timeout = 20)
   public void testComponentStartOrder() throws Exception {
 setupInternal(NUM_NMS);
@@ -146,17 +148,23 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 exampleApp.setName("teststartorder");
 exampleApp.setVersion("v1");
 exampleApp.addComponent(createComponent("compa", 2, "sleep 1000"));
-Component compb = createComponent("compb", 2, "sleep 1000");
 
-// Let compb depedends on compa;
+// Let compb depend on compa
+Component compb = createComponent("compb", 2, "sleep 1000");
 compb.setDependencies(Collections.singletonList("compa"));
 exampleApp.addComponent(compb);
 
+// Let compc depend on compb
+Component compc = createComponent("compc", 2, "sleep 1000");
+compc.setDependencies(Collections.singletonList("compb"));
+exampleApp.addComponent(compc);
+
 client.actionCreate(exampleApp);
 

hadoop git commit: YARN-7941. Transitive dependencies for component are not resolved. Contributed by Billie Rinaldi.

2018-04-10 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk d919eb6ef -> c04871109


YARN-7941. Transitive dependencies for component are not resolved. Contributed 
by Billie Rinaldi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0487110
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0487110
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0487110

Branch: refs/heads/trunk
Commit: c0487110990958fa985d273eb178bdf76002cf3a
Parents: d919eb6
Author: Rohith Sharma K S 
Authored: Wed Apr 11 09:18:50 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed Apr 11 09:18:50 2018 +0530

--
 .../hadoop/yarn/service/component/Component.java|  1 +
 .../hadoop/yarn/service/TestYarnNativeServices.java | 16 
 2 files changed, 13 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0487110/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index a2127c8..39897f6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -179,6 +179,7 @@ public class Component implements 
EventHandler {
 maxContainerFailurePerComp = componentSpec.getConfiguration()
 .getPropertyInt(CONTAINER_FAILURE_THRESHOLD, 10);
 createNumCompInstances(component.getNumberOfContainers());
+setDesiredContainers(component.getNumberOfContainers().intValue());
   }
 
   private void createNumCompInstances(long count) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0487110/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index 2b44701..5e267bb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
@@ -166,7 +166,9 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 
   // Create compa with 2 containers
   // Create compb with 2 containers which depends on compa
-  // Check containers for compa started before containers for compb
+  // Create compc with 2 containers which depends on compb
+  // Check containers for compa started before containers for compb before
+  // containers for compc
   @Test (timeout = 20)
   public void testComponentStartOrder() throws Exception {
 setupInternal(NUM_NMS);
@@ -175,17 +177,23 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 exampleApp.setName("teststartorder");
 exampleApp.setVersion("v1");
 exampleApp.addComponent(createComponent("compa", 2, "sleep 1000"));
-Component compb = createComponent("compb", 2, "sleep 1000");
 
-// Let compb depedends on compa;
+// Let compb depend on compa
+Component compb = createComponent("compb", 2, "sleep 1000");
 compb.setDependencies(Collections.singletonList("compa"));
 exampleApp.addComponent(compb);
 
+// Let compc depend on compb
+Component compc = createComponent("compc", 2, "sleep 1000");
+compc.setDependencies(Collections.singletonList("compb"));
+exampleApp.addComponent(compc);
+
 client.actionCreate(exampleApp);
 waitForServiceToBeStable(client, exampleApp);
 
 // check that containers 

[1/2] hadoop git commit: YARN-8116. Nodemanager fails with NumberFormatException: For input string: . (Chandni Singh via wangda)

2018-04-10 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 6d6f65f22 -> 53abef4e3


YARN-8116. Nodemanager fails with NumberFormatException: For input string: . 
(Chandni Singh via wangda)

Change-Id: Idd30cfca59982d3fc6e47aa1b88f844a78fae94d
(cherry picked from commit 2bf9cc2c73944c9f7cde56714b8cf6995cfa539b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5dc7d1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5dc7d1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5dc7d1e

Branch: refs/heads/branch-3.1
Commit: f5dc7d1edf8bc25fd3a6c8fb186913de07b08203
Parents: 6d6f65f
Author: Wangda Tan 
Authored: Tue Apr 10 17:32:38 2018 -0700
Committer: Wangda Tan 
Committed: Tue Apr 10 17:38:31 2018 -0700

--
 .../containermanager/container/ContainerImpl.java   |  3 ++-
 .../recovery/NMLeveldbStateStoreService.java|  4 +++-
 .../recovery/TestNMLeveldbStateStoreService.java| 16 
 3 files changed, 21 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dc7d1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 2115100..c09c7f1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -2191,7 +2191,8 @@ public class ContainerImpl implements Container {
   }
 
   private void storeRetryContext() {
-if (windowRetryContext.getRestartTimes() != null) {
+if (windowRetryContext.getRestartTimes() != null &&
+!windowRetryContext.getRestartTimes().isEmpty()) {
   try {
 stateStore.storeContainerRestartTimes(containerId,
 windowRetryContext.getRestartTimes());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dc7d1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index bf4c0ad..723dd48 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -347,7 +347,9 @@ public class NMLeveldbStateStoreService extends 
NMStateStoreService {
 value.substring(1, value.length() - 1).split(", ");
 List restartTimes = new ArrayList<>();
 for (String restartTime : unparsedRestartTimes) {
-  restartTimes.add(Long.parseLong(restartTime));
+  if (!restartTime.isEmpty()) {
+restartTimes.add(Long.parseLong(restartTime));
+  }
 }
 rcs.setRestartTimes(restartTimes);
   } else if (suffix.equals(CONTAINER_WORK_DIR_KEY_SUFFIX)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dc7d1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
 

[2/2] hadoop git commit: YARN-8133. Doc link broken for yarn-service from overview page. (Rohith Sharma K S via wangda)

2018-04-10 Thread wangda
YARN-8133. Doc link broken for yarn-service from overview page. (Rohith Sharma 
K S via wangda)

Change-Id: Iacf9a004585dd59e1c0cd8f8c618a38f047cc0fe
(cherry picked from commit d919eb6efa1072517017c75fb323e391f4418dc8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53abef4e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53abef4e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53abef4e

Branch: refs/heads/branch-3.1
Commit: 53abef4e30cdfa5cfe9fd3eccf06e26da8b56dd2
Parents: f5dc7d1
Author: Wangda Tan 
Authored: Tue Apr 10 17:33:15 2018 -0700
Committer: Wangda Tan 
Committed: Tue Apr 10 17:38:37 2018 -0700

--
 .../src/site/markdown/yarn-service/Concepts.md|  6 +++---
 .../src/site/markdown/yarn-service/Overview.md| 14 +++---
 .../src/site/markdown/yarn-service/QuickStart.md  | 12 ++--
 .../src/site/markdown/yarn-service/RegistryDNS.md |  4 ++--
 .../site/markdown/yarn-service/ServiceDiscovery.md|  4 ++--
 5 files changed, 20 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53abef4e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
index e567d03..5c77f17 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
@@ -29,7 +29,7 @@ affinity and anti-affinity scheduling, log aggregation for 
services, automatical
 A restful API server is developed to allow users to deploy/manage their 
services on YARN via a simple JSON spec. This avoids users
 from dealing with the low-level APIs, writing complex code to bring their 
services onto YARN. The REST layer acts as a unified REST based entry for
 creation and lifecycle management of YARN services. Services here can range 
from simple single-component apps to the most complex, 
-multi-component applications needing special orchestration needs. Please refer 
to this [API doc](YarnServiceAPI.md) for detailed API documentations.
+multi-component applications needing special orchestration needs. Please refer 
to this [API doc](YarnServiceAPI.html) for detailed API documentations.
 
 The API-server is stateless, which means users can simply spin up multiple 
instances, and have a load balancer fronting them to 
 support HA, distribute the load etc.
@@ -37,10 +37,10 @@ support HA, distribute the load etc.
 ### Service Discovery
 A DNS server is implemented to enable discovering services on YARN via the 
standard mechanism: DNS lookup.
 
-The framework posts container information such as hostname and ip into the 
[YARN service registry](../registry/index.md). And the DNS server essentially 
exposes the
+The framework posts container information such as hostname and ip into the 
[YARN service registry](../registry/index.html). And the DNS server essentially 
exposes the
 information in YARN service registry by translating them into DNS records such 
as A record and SRV record.
 Clients can then discover the IPs of containers via standard DNS lookup.
 
 The previous read mechanisms of YARN Service Registry were limited to a 
registry specific (java) API and a REST interface and are difficult
-to wireup existing clients and services. The DNS based service discovery 
eliminates this gap. Please refer to this [Service Discovery 
doc](ServiceDiscovery.md)
+to wireup existing clients and services. The DNS based service discovery 
eliminates this gap. Please refer to this [Service Discovery 
doc](ServiceDiscovery.html)
 for more details.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53abef4e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
index 72c2d3e..8e2bf9a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
@@ -46,16 +46,16 @@ The benefits of combining these workloads are two-fold:
 
 *`This feature is in alpha state`* and so APIs, command lines are subject to 
change. We will continue 

[1/2] hadoop git commit: YARN-8116. Nodemanager fails with NumberFormatException: For input string: . (Chandni Singh via wangda)

2018-04-10 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk c467f311d -> d919eb6ef


YARN-8116. Nodemanager fails with NumberFormatException: For input string: . 
(Chandni Singh via wangda)

Change-Id: Idd30cfca59982d3fc6e47aa1b88f844a78fae94d


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bf9cc2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bf9cc2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bf9cc2c

Branch: refs/heads/trunk
Commit: 2bf9cc2c73944c9f7cde56714b8cf6995cfa539b
Parents: c467f31
Author: Wangda Tan 
Authored: Tue Apr 10 17:32:38 2018 -0700
Committer: Wangda Tan 
Committed: Tue Apr 10 17:32:38 2018 -0700

--
 .../containermanager/container/ContainerImpl.java   |  3 ++-
 .../recovery/NMLeveldbStateStoreService.java|  4 +++-
 .../recovery/TestNMLeveldbStateStoreService.java| 16 
 3 files changed, 21 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bf9cc2c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 2115100..c09c7f1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -2191,7 +2191,8 @@ public class ContainerImpl implements Container {
   }
 
   private void storeRetryContext() {
-if (windowRetryContext.getRestartTimes() != null) {
+if (windowRetryContext.getRestartTimes() != null &&
+!windowRetryContext.getRestartTimes().isEmpty()) {
   try {
 stateStore.storeContainerRestartTimes(containerId,
 windowRetryContext.getRestartTimes());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bf9cc2c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index bf4c0ad..723dd48 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -347,7 +347,9 @@ public class NMLeveldbStateStoreService extends 
NMStateStoreService {
 value.substring(1, value.length() - 1).split(", ");
 List restartTimes = new ArrayList<>();
 for (String restartTime : unparsedRestartTimes) {
-  restartTimes.add(Long.parseLong(restartTime));
+  if (!restartTime.isEmpty()) {
+restartTimes.add(Long.parseLong(restartTime));
+  }
 }
 rcs.setRestartTimes(restartTimes);
   } else if (suffix.equals(CONTAINER_WORK_DIR_KEY_SUFFIX)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bf9cc2c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
 

[2/2] hadoop git commit: YARN-8133. Doc link broken for yarn-service from overview page. (Rohith Sharma K S via wangda)

2018-04-10 Thread wangda
YARN-8133. Doc link broken for yarn-service from overview page. (Rohith Sharma 
K S via wangda)

Change-Id: Iacf9a004585dd59e1c0cd8f8c618a38f047cc0fe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d919eb6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d919eb6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d919eb6e

Branch: refs/heads/trunk
Commit: d919eb6efa1072517017c75fb323e391f4418dc8
Parents: 2bf9cc2
Author: Wangda Tan 
Authored: Tue Apr 10 17:33:15 2018 -0700
Committer: Wangda Tan 
Committed: Tue Apr 10 17:33:15 2018 -0700

--
 .../src/site/markdown/yarn-service/Concepts.md|  6 +++---
 .../src/site/markdown/yarn-service/Overview.md| 14 +++---
 .../src/site/markdown/yarn-service/QuickStart.md  | 12 ++--
 .../src/site/markdown/yarn-service/RegistryDNS.md |  4 ++--
 .../site/markdown/yarn-service/ServiceDiscovery.md|  4 ++--
 5 files changed, 20 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d919eb6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
index e567d03..5c77f17 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
@@ -29,7 +29,7 @@ affinity and anti-affinity scheduling, log aggregation for 
services, automatical
 A restful API server is developed to allow users to deploy/manage their 
services on YARN via a simple JSON spec. This avoids users
 from dealing with the low-level APIs, writing complex code to bring their 
services onto YARN. The REST layer acts as a unified REST based entry for
 creation and lifecycle management of YARN services. Services here can range 
from simple single-component apps to the most complex, 
-multi-component applications needing special orchestration needs. Please refer 
to this [API doc](YarnServiceAPI.md) for detailed API documentations.
+multi-component applications needing special orchestration needs. Please refer 
to this [API doc](YarnServiceAPI.html) for detailed API documentations.
 
 The API-server is stateless, which means users can simply spin up multiple 
instances, and have a load balancer fronting them to 
 support HA, distribute the load etc.
@@ -37,10 +37,10 @@ support HA, distribute the load etc.
 ### Service Discovery
 A DNS server is implemented to enable discovering services on YARN via the 
standard mechanism: DNS lookup.
 
-The framework posts container information such as hostname and ip into the 
[YARN service registry](../registry/index.md). And the DNS server essentially 
exposes the
+The framework posts container information such as hostname and ip into the 
[YARN service registry](../registry/index.html). And the DNS server essentially 
exposes the
 information in YARN service registry by translating them into DNS records such 
as A record and SRV record.
 Clients can then discover the IPs of containers via standard DNS lookup.
 
 The previous read mechanisms of YARN Service Registry were limited to a 
registry specific (java) API and a REST interface and are difficult
-to wireup existing clients and services. The DNS based service discovery 
eliminates this gap. Please refer to this [Service Discovery 
doc](ServiceDiscovery.md)
+to wireup existing clients and services. The DNS based service discovery 
eliminates this gap. Please refer to this [Service Discovery 
doc](ServiceDiscovery.html)
 for more details.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d919eb6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
index 72c2d3e..8e2bf9a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
@@ -46,16 +46,16 @@ The benefits of combining these workloads are two-fold:
 
 *`This feature is in alpha state`* and so APIs, command lines are subject to 
change. We will continue to update the documents over time.
 
-[QuickStart](QuickStart.md) shows a 

hadoop git commit: YARN-7973. Added ContainerRelaunch feature for Docker containers. Contributed by Shane Kumpf

2018-04-10 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 583fa6ed4 -> c467f311d


YARN-7973. Added ContainerRelaunch feature for Docker containers.
   Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c467f311
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c467f311
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c467f311

Branch: refs/heads/trunk
Commit: c467f311d0c7155c09052d93fac12045af925583
Parents: 583fa6e
Author: Eric Yang 
Authored: Tue Apr 10 19:25:00 2018 -0400
Committer: Eric Yang 
Committed: Tue Apr 10 19:25:00 2018 -0400

--
 .../hadoop/yarn/api/ApplicationConstants.java   |  10 ++
 .../server/nodemanager/ContainerExecutor.java   |  11 ++
 .../nodemanager/DefaultContainerExecutor.java   |   6 +
 .../nodemanager/LinuxContainerExecutor.java | 124 +++
 .../launcher/ContainerLaunch.java   |  20 ++-
 .../launcher/ContainerRelaunch.java |   2 +-
 .../runtime/DefaultLinuxContainerRuntime.java   |   6 +
 .../DelegatingLinuxContainerRuntime.java|   9 ++
 .../runtime/DockerLinuxContainerRuntime.java|  40 +-
 .../JavaSandboxLinuxContainerRuntime.java   |  10 ++
 .../runtime/docker/DockerCommandExecutor.java   |  14 +++
 .../runtime/docker/DockerStartCommand.java  |  29 +
 .../runtime/ContainerRuntime.java   |  10 ++
 .../impl/container-executor.c   |   2 +-
 .../container-executor/impl/utils/docker-util.c |  40 ++
 .../container-executor/impl/utils/docker-util.h |  11 ++
 .../test/utils/test_docker_util.cc  |  20 +++
 .../nodemanager/TestLinuxContainerExecutor.java |  12 ++
 .../launcher/TestContainerRelaunch.java |   2 +-
 .../runtime/TestDockerContainerRuntime.java |  90 ++
 .../runtime/docker/TestDockerStartCommand.java  |  53 
 .../TestContainersMonitorResourceChange.java|   5 +
 22 files changed, 470 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c467f311/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index 64bcc44..38ad596 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -99,6 +99,16 @@ public interface ApplicationConstants {
   public static final String STDOUT = "stdout";
 
   /**
+   * The type of launch for the container.
+   */
+  @Public
+  @Unstable
+  enum ContainerLaunchType {
+LAUNCH,
+RELAUNCH
+  }
+
+  /**
* Environment for Applications.
*
* Some of the environment variables for applications are final

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c467f311/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index f566f48..8e335350 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -182,6 +182,17 @@ public abstract class ContainerExecutor implements 
Configurable {
   IOException, ConfigurationException;
 
   /**
+   * Relaunch the container on the node. This is a blocking call and returns
+   * only when the container exits.
+   * @param ctx Encapsulates information necessary for relaunching containers.
+   * @return the return status of the relaunch
+   * @throws IOException if the container relaunch fails
+   * @throws ConfigurationException if config error was found
+   */
+  public abstract int relaunchContainer(ContainerStartContext ctx) throws
+  IOException, ConfigurationException;
+
+  

[1/2] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-10 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 5f8ab3a6b -> 714a079ff


http://git-wip-us.apache.org/repos/asf/hadoop/blob/714a079f/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 308c974..b67b8a1 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -1,3 +1,4 @@
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -30,20 +31,28 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersi
 import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
+import org.apache.hadoop.crypto.key.kms.KMSTokenRenewer;
 import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.TestLoadBalancingKMSClientProvider;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.KMSUtil;
+import org.apache.hadoop.util.KMSUtilFaultInjector;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
@@ -63,7 +72,6 @@ import java.io.FileWriter;
 import java.io.IOException;
 import java.io.Writer;
 import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.SocketTimeoutException;
 import java.net.URI;
@@ -81,17 +89,46 @@ import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.Callable;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
+import static org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_KIND;
+import static 
org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_LEGACY_KIND;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 public class TestKMS {
   private static final Logger LOG = LoggerFactory.getLogger(TestKMS.class);
 
   private static final String SSL_RELOADER_THREAD_NAME =
   "Truststore reloader thread";
 
+  private final KMSUtilFaultInjector oldInjector =
+  KMSUtilFaultInjector.get();
+
+  // Injector to create providers with different ports. Can only happen in 
tests
+  private final KMSUtilFaultInjector testInjector =
+  new KMSUtilFaultInjector() {
+@Override
+public KeyProvider createKeyProviderForTests(String value,
+Configuration conf) throws IOException {
+  return TestLoadBalancingKMSClientProvider
+  .createKeyProviderForTests(value, conf);
+}
+  };
+
   @Rule
   public final Timeout testTimeout = new Timeout(18);
 
   @Before
-  public void cleanUp() {
+  public void setUp() throws Exception {
+GenericTestUtils.setLogLevel(KMSClientProvider.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticationHandler.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticator.LOG, Level.TRACE);
+GenericTestUtils.setLogLevel(KMSUtil.LOG, Level.TRACE);
 // resetting kerberos security
 Configuration conf = new Configuration();
 UserGroupInformation.setConfiguration(conf);
@@ -111,17 +148,71 @@ public class TestKMS {
   }
 
   public static abstract class KMSCallable implements Callable {
-private URL kmsUrl;
+private List kmsUrl;

[1/2] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-10 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 87485d40c -> 46ac59a9b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46ac59a9/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 712536f..b75eb76 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -1,3 +1,4 @@
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -30,19 +31,27 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersi
 import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
+import org.apache.hadoop.crypto.key.kms.KMSTokenRenewer;
 import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.TestLoadBalancingKMSClientProvider;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.KMSUtil;
+import org.apache.hadoop.util.KMSUtilFaultInjector;
 import org.apache.hadoop.util.Time;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
@@ -64,7 +73,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.Writer;
 import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.SocketTimeoutException;
 import java.net.URI;
@@ -82,7 +90,14 @@ import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.Callable;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
+import static org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_KIND;
+import static 
org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_LEGACY_KIND;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class TestKMS {
   private static final Logger LOG = LoggerFactory.getLogger(TestKMS.class);
@@ -90,11 +105,31 @@ public class TestKMS {
   private static final String SSL_RELOADER_THREAD_NAME =
   "Truststore reloader thread";
 
+  private final KMSUtilFaultInjector oldInjector =
+  KMSUtilFaultInjector.get();
+
+  // Injector to create providers with different ports. Can only happen in 
tests
+  private final KMSUtilFaultInjector testInjector =
+  new KMSUtilFaultInjector() {
+@Override
+public KeyProvider createKeyProviderForTests(String value,
+Configuration conf) throws IOException {
+  return TestLoadBalancingKMSClientProvider
+  .createKeyProviderForTests(value, conf);
+}
+  };
+
   @Rule
   public final Timeout testTimeout = new Timeout(18);
 
   @Before
-  public void cleanUp() {
+  public void setUp() throws Exception {
+GenericTestUtils.setLogLevel(KMSClientProvider.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticationHandler.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticator.LOG, Level.TRACE);
+GenericTestUtils.setLogLevel(KMSUtil.LOG, Level.TRACE);
 // resetting kerberos security
 Configuration conf = new Configuration();
 UserGroupInformation.setConfiguration(conf);
@@ -114,17 +149,71 @@ public class TestKMS {
   }
 
   public static abstract class KMSCallable implements Callable {
-private URL kmsUrl;
+private 

[2/2] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-10 Thread xiao
HADOOP-14445. Delegation tokens are not shared between KMS instances. 
Contributed by Xiao Chen and Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/714a079f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/714a079f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/714a079f

Branch: refs/heads/branch-2.8
Commit: 714a079ffb88540ec1e09d5023c35e1fa0dd016d
Parents: 5f8ab3a
Author: Xiao Chen 
Authored: Tue Apr 10 15:47:42 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 15:48:19 2018 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   | 214 
 .../crypto/key/kms/KMSDelegationToken.java  |  22 +-
 .../crypto/key/kms/KMSLegacyTokenRenewer.java   |  56 ++
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java  | 103 
 .../hadoop/crypto/key/kms/package-info.java |  18 +
 .../fs/CommonConfigurationKeysPublic.java   |  10 +
 .../web/DelegationTokenAuthenticatedURL.java|  21 +-
 .../DelegationTokenAuthenticationHandler.java   |   8 +-
 .../web/DelegationTokenAuthenticator.java   |   2 +-
 .../java/org/apache/hadoop/util/KMSUtil.java|  45 +-
 .../hadoop/util/KMSUtilFaultInjector.java   |  49 ++
 ...apache.hadoop.security.token.TokenIdentifier |   1 +
 ...rg.apache.hadoop.security.token.TokenRenewer |   3 +-
 .../src/main/resources/core-default.xml |  20 +
 .../crypto/key/kms/TestKMSClientProvider.java   | 166 ++
 .../kms/TestLoadBalancingKMSClientProvider.java |  67 ++-
 .../org/apache/hadoop/util/TestKMSUtil.java |  65 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 507 ---
 18 files changed, 1176 insertions(+), 201 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/714a079f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index e165ca2..59ec9cc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -36,11 +36,11 @@ import 
org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenRenewer;
+import org.apache.hadoop.security.token.TokenSelector;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.util.HttpExceptionUtils;
-import org.apache.hadoop.util.KMSUtil;
 import org.apache.http.client.utils.URIBuilder;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.slf4j.Logger;
@@ -82,6 +82,9 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_DEFAULT;
+
 /**
  * KMS client KeyProvider implementation.
  */
@@ -89,16 +92,13 @@ import com.google.common.base.Strings;
 public class KMSClientProvider extends KeyProvider implements CryptoExtension,
 KeyProviderDelegationTokenExtension.DelegationTokenExtension {
 
-  private static final Logger LOG =
+  public static final Logger LOG =
   LoggerFactory.getLogger(KMSClientProvider.class);
 
   private static final String INVALID_SIGNATURE = "Invalid signature";
 
   private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous 
requests are disallowed";
 
-  public static final String TOKEN_KIND_STR = 
KMSDelegationToken.TOKEN_KIND_STR;
-  public static final Text TOKEN_KIND = KMSDelegationToken.TOKEN_KIND;
-
   public static final String SCHEME_NAME = "kms";
 
   private static final String UTF8 = "UTF-8";
@@ -123,12 +123,17 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 
   private final ValueQueue encKeyVersionQueue;
 
+  /* dtService defines the token service value for the kms token.
+   * The value can be legacy format which is ip:port format or it can be uri.
+   * If it's uri 

[2/2] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-10 Thread xiao
HADOOP-14445. Delegation tokens are not shared between KMS instances. 
Contributed by Xiao Chen and Rushabh S Shah.

(cherry picked from commit 95cedc5587a495b46748973218454be87ba8b92e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46ac59a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46ac59a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46ac59a9

Branch: refs/heads/branch-2.9
Commit: 46ac59a9bd464da35467ce924f980368a4b5cad4
Parents: 87485d4
Author: Xiao Chen 
Authored: Tue Apr 10 15:46:30 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 15:48:10 2018 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   | 214 
 .../crypto/key/kms/KMSDelegationToken.java  |  22 +-
 .../crypto/key/kms/KMSLegacyTokenRenewer.java   |  56 ++
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java  | 103 
 .../hadoop/crypto/key/kms/package-info.java |  18 +
 .../fs/CommonConfigurationKeysPublic.java   |  10 +
 .../web/DelegationTokenAuthenticatedURL.java|  21 +-
 .../DelegationTokenAuthenticationHandler.java   |   8 +-
 .../web/DelegationTokenAuthenticator.java   |   2 +-
 .../java/org/apache/hadoop/util/KMSUtil.java|  45 +-
 .../hadoop/util/KMSUtilFaultInjector.java   |  49 ++
 ...apache.hadoop.security.token.TokenIdentifier |   1 +
 ...rg.apache.hadoop.security.token.TokenRenewer |   3 +-
 .../src/main/resources/core-default.xml |  20 +
 .../crypto/key/kms/TestKMSClientProvider.java   | 166 ++
 .../kms/TestLoadBalancingKMSClientProvider.java |  67 ++-
 .../org/apache/hadoop/util/TestKMSUtil.java |  65 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 505 ---
 18 files changed, 1174 insertions(+), 201 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46ac59a9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index cdd494f..536de53 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -36,11 +36,11 @@ import 
org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenRenewer;
+import org.apache.hadoop.security.token.TokenSelector;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.util.HttpExceptionUtils;
-import org.apache.hadoop.util.KMSUtil;
 import org.apache.http.client.utils.URIBuilder;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.ObjectWriter;
@@ -83,6 +83,9 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_DEFAULT;
+
 /**
  * KMS client KeyProvider implementation.
  */
@@ -90,16 +93,13 @@ import com.google.common.base.Strings;
 public class KMSClientProvider extends KeyProvider implements CryptoExtension,
 KeyProviderDelegationTokenExtension.DelegationTokenExtension {
 
-  private static final Logger LOG =
+  public static final Logger LOG =
   LoggerFactory.getLogger(KMSClientProvider.class);
 
   private static final String INVALID_SIGNATURE = "Invalid signature";
 
   private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous 
requests are disallowed";
 
-  public static final String TOKEN_KIND_STR = 
KMSDelegationToken.TOKEN_KIND_STR;
-  public static final Text TOKEN_KIND = KMSDelegationToken.TOKEN_KIND;
-
   public static final String SCHEME_NAME = "kms";
 
   private static final String UTF8 = "UTF-8";
@@ -127,12 +127,17 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   private static final ObjectWriter WRITER =
   new ObjectMapper().writerWithDefaultPrettyPrinter();
 
+  /* dtService defines 

[1/2] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-10 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0fb1457d8 -> 95cedc558


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95cedc55/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 712536f..b75eb76 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -1,3 +1,4 @@
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -30,19 +31,27 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersi
 import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
+import org.apache.hadoop.crypto.key.kms.KMSTokenRenewer;
 import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.TestLoadBalancingKMSClientProvider;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.KMSUtil;
+import org.apache.hadoop.util.KMSUtilFaultInjector;
 import org.apache.hadoop.util.Time;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
@@ -64,7 +73,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.Writer;
 import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.SocketTimeoutException;
 import java.net.URI;
@@ -82,7 +90,14 @@ import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.Callable;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
+import static org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_KIND;
+import static 
org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_LEGACY_KIND;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class TestKMS {
   private static final Logger LOG = LoggerFactory.getLogger(TestKMS.class);
@@ -90,11 +105,31 @@ public class TestKMS {
   private static final String SSL_RELOADER_THREAD_NAME =
   "Truststore reloader thread";
 
+  private final KMSUtilFaultInjector oldInjector =
+  KMSUtilFaultInjector.get();
+
+  // Injector to create providers with different ports. Can only happen in 
tests
+  private final KMSUtilFaultInjector testInjector =
+  new KMSUtilFaultInjector() {
+@Override
+public KeyProvider createKeyProviderForTests(String value,
+Configuration conf) throws IOException {
+  return TestLoadBalancingKMSClientProvider
+  .createKeyProviderForTests(value, conf);
+}
+  };
+
   @Rule
   public final Timeout testTimeout = new Timeout(18);
 
   @Before
-  public void cleanUp() {
+  public void setUp() throws Exception {
+GenericTestUtils.setLogLevel(KMSClientProvider.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticationHandler.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticator.LOG, Level.TRACE);
+GenericTestUtils.setLogLevel(KMSUtil.LOG, Level.TRACE);
 // resetting kerberos security
 Configuration conf = new Configuration();
 UserGroupInformation.setConfiguration(conf);
@@ -114,17 +149,71 @@ public class TestKMS {
   }
 
   public static abstract class KMSCallable implements Callable {
-private URL kmsUrl;
+private List 

[2/2] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-10 Thread xiao
HADOOP-14445. Delegation tokens are not shared between KMS instances. 
Contributed by Xiao Chen and Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95cedc55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95cedc55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95cedc55

Branch: refs/heads/branch-2
Commit: 95cedc5587a495b46748973218454be87ba8b92e
Parents: 0fb1457
Author: Xiao Chen 
Authored: Tue Apr 10 15:46:30 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 15:46:46 2018 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   | 214 
 .../crypto/key/kms/KMSDelegationToken.java  |  22 +-
 .../crypto/key/kms/KMSLegacyTokenRenewer.java   |  56 ++
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java  | 103 
 .../hadoop/crypto/key/kms/package-info.java |  18 +
 .../fs/CommonConfigurationKeysPublic.java   |  10 +
 .../web/DelegationTokenAuthenticatedURL.java|  21 +-
 .../DelegationTokenAuthenticationHandler.java   |   8 +-
 .../web/DelegationTokenAuthenticator.java   |   2 +-
 .../java/org/apache/hadoop/util/KMSUtil.java|  45 +-
 .../hadoop/util/KMSUtilFaultInjector.java   |  49 ++
 ...apache.hadoop.security.token.TokenIdentifier |   1 +
 ...rg.apache.hadoop.security.token.TokenRenewer |   3 +-
 .../src/main/resources/core-default.xml |  20 +
 .../crypto/key/kms/TestKMSClientProvider.java   | 166 ++
 .../kms/TestLoadBalancingKMSClientProvider.java |  67 ++-
 .../org/apache/hadoop/util/TestKMSUtil.java |  65 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 505 ---
 18 files changed, 1174 insertions(+), 201 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95cedc55/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index cdd494f..536de53 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -36,11 +36,11 @@ import 
org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenRenewer;
+import org.apache.hadoop.security.token.TokenSelector;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.util.HttpExceptionUtils;
-import org.apache.hadoop.util.KMSUtil;
 import org.apache.http.client.utils.URIBuilder;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.ObjectWriter;
@@ -83,6 +83,9 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_DEFAULT;
+
 /**
  * KMS client KeyProvider implementation.
  */
@@ -90,16 +93,13 @@ import com.google.common.base.Strings;
 public class KMSClientProvider extends KeyProvider implements CryptoExtension,
 KeyProviderDelegationTokenExtension.DelegationTokenExtension {
 
-  private static final Logger LOG =
+  public static final Logger LOG =
   LoggerFactory.getLogger(KMSClientProvider.class);
 
   private static final String INVALID_SIGNATURE = "Invalid signature";
 
   private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous 
requests are disallowed";
 
-  public static final String TOKEN_KIND_STR = 
KMSDelegationToken.TOKEN_KIND_STR;
-  public static final Text TOKEN_KIND = KMSDelegationToken.TOKEN_KIND;
-
   public static final String SCHEME_NAME = "kms";
 
   private static final String UTF8 = "UTF-8";
@@ -127,12 +127,17 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   private static final ObjectWriter WRITER =
   new ObjectMapper().writerWithDefaultPrettyPrinter();
 
+  /* dtService defines the token service value for the kms token.
+   * The value can be 

[3/3] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-10 Thread xiao
HADOOP-14445. Delegation tokens are not shared between KMS instances. 
Contributed by Xiao Chen and Rushabh S Shah.

(cherry picked from commit 583fa6ed48ad3df40bcaa9c591d5ccd07ce3ea81)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d6f65f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d6f65f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d6f65f2

Branch: refs/heads/branch-3.1
Commit: 6d6f65f224eee8cc425c4bed0ed3b3716445950b
Parents: 96af1af
Author: Xiao Chen 
Authored: Tue Apr 10 15:26:33 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 15:45:35 2018 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   | 212 
 .../crypto/key/kms/KMSDelegationToken.java  |  22 +-
 .../crypto/key/kms/KMSLegacyTokenRenewer.java   |  56 ++
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java  | 103 
 .../hadoop/crypto/key/kms/package-info.java |  18 +
 .../fs/CommonConfigurationKeysPublic.java   |  10 +
 .../web/DelegationTokenAuthenticatedURL.java|  21 +-
 .../DelegationTokenAuthenticationHandler.java   |   8 +-
 .../web/DelegationTokenAuthenticator.java   |   2 +-
 .../java/org/apache/hadoop/util/KMSUtil.java|  45 +-
 .../hadoop/util/KMSUtilFaultInjector.java   |  49 ++
 ...apache.hadoop.security.token.TokenIdentifier |   1 +
 ...rg.apache.hadoop.security.token.TokenRenewer |   3 +-
 .../src/main/resources/core-default.xml |  20 +
 .../crypto/key/kms/TestKMSClientProvider.java   | 162 ++
 .../kms/TestLoadBalancingKMSClientProvider.java |  67 ++-
 .../org/apache/hadoop/util/TestKMSUtil.java |  65 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 519 ---
 18 files changed, 1180 insertions(+), 203 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d6f65f2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 2eb2e21..f97fde7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -36,8 +36,9 @@ import 
org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenRenewer;
+import org.apache.hadoop.security.token.TokenSelector;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.hadoop.util.KMSUtil;
@@ -82,6 +83,8 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_DEFAULT;
 import static org.apache.hadoop.util.KMSUtil.checkNotEmpty;
 import static org.apache.hadoop.util.KMSUtil.checkNotNull;
 import static org.apache.hadoop.util.KMSUtil.parseJSONEncKeyVersion;
@@ -96,16 +99,13 @@ import static 
org.apache.hadoop.util.KMSUtil.parseJSONMetadata;
 public class KMSClientProvider extends KeyProvider implements CryptoExtension,
 KeyProviderDelegationTokenExtension.DelegationTokenExtension {
 
-  private static final Logger LOG =
+  public static final Logger LOG =
   LoggerFactory.getLogger(KMSClientProvider.class);
 
   private static final String INVALID_SIGNATURE = "Invalid signature";
 
   private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous 
requests are disallowed";
 
-  public static final String TOKEN_KIND_STR = 
KMSDelegationToken.TOKEN_KIND_STR;
-  public static final Text TOKEN_KIND = KMSDelegationToken.TOKEN_KIND;
-
   public static final String SCHEME_NAME = "kms";
 
   private static final String UTF8 = "UTF-8";
@@ -133,12 +133,17 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   private static final ObjectWriter WRITER =
   new ObjectMapper().writerWithDefaultPrettyPrinter();
 
+  /* 

[1/3] hadoop git commit: HADOOP-15313. TestKMS should close providers.

2018-04-10 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 ffceb907f -> 6d6f65f22


HADOOP-15313. TestKMS should close providers.

(cherry picked from commit c22d62b338cb16d93c4576a9c634041e3610a116)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96af1af2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96af1af2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96af1af2

Branch: refs/heads/branch-3.1
Commit: 96af1af28f67de57c9ef738d21b47f4f47380a3b
Parents: ffceb90
Author: Xiao Chen 
Authored: Mon Mar 26 15:59:17 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 15:45:34 2018 -0700

--
 .../apache/hadoop/io/MultipleIOException.java   | 10 ++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 38 
 2 files changed, 41 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96af1af2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
index 66c1ab1..c9d7ade 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
@@ -76,5 +76,15 @@ public class MultipleIOException extends IOException {
 public IOException build() {
   return createIOException(exceptions);
 }
+
+/**
+ * @return whether any exception was added.
+ */
+public boolean isEmpty() {
+  if (exceptions == null) {
+return true;
+  }
+  return exceptions.isEmpty();
+}
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96af1af2/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 1189fbf..1517b04 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -35,6 +35,7 @@ import 
org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.ValueQueue;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
@@ -84,6 +85,7 @@ import java.util.Collection;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
@@ -111,6 +113,10 @@ public class TestKMS {
 
   private SSLFactory sslFactory;
 
+  // Keep track of all key providers created during a test case, so they can be
+  // closed at test tearDown.
+  private List providersCreated = new LinkedList<>();
+
   @Rule
   public final Timeout testTimeout = new Timeout(18);
 
@@ -144,13 +150,17 @@ public class TestKMS {
 
   protected KeyProvider createProvider(URI uri, Configuration conf)
   throws IOException {
-return new LoadBalancingKMSClientProvider(
-new KMSClientProvider[] { new KMSClientProvider(uri, conf) }, conf);
+final KeyProvider ret = new LoadBalancingKMSClientProvider(
+new KMSClientProvider[] {new KMSClientProvider(uri, conf)}, conf);
+providersCreated.add(ret);
+return ret;
   }
 
   private KMSClientProvider createKMSClientProvider(URI uri, Configuration 
conf)
   throws IOException {
-return new KMSClientProvider(uri, conf);
+final KMSClientProvider ret = new KMSClientProvider(uri, conf);
+providersCreated.add(ret);
+return ret;
   }
 
   protected  T runServer(String keystore, String password, File confDir,
@@ -311,13 +321,28 @@ public class TestKMS {
   }
 
   @After
-  public void tearDownMiniKdc() throws Exception {
+  public void tearDown() throws Exception {
 if (kdc != null) {
   kdc.stop();
   kdc = null;
 }
 UserGroupInformation.setShouldRenewImmediatelyForTests(false);
 UserGroupInformation.reset();
+if (!providersCreated.isEmpty()) {
+  final 

[1/3] hadoop git commit: HADOOP-15313. TestKMS should close providers.

2018-04-10 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 eef5d1947 -> 72acda144


HADOOP-15313. TestKMS should close providers.

(cherry picked from commit c22d62b338cb16d93c4576a9c634041e3610a116)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46edbedd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46edbedd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46edbedd

Branch: refs/heads/branch-3.0
Commit: 46edbedd99771db03cfdf82bd4179f13fcb8405d
Parents: eef5d19
Author: Xiao Chen 
Authored: Mon Mar 26 15:59:17 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 15:45:47 2018 -0700

--
 .../apache/hadoop/io/MultipleIOException.java   | 10 ++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 38 
 2 files changed, 41 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edbedd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
index 66c1ab1..c9d7ade 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
@@ -76,5 +76,15 @@ public class MultipleIOException extends IOException {
 public IOException build() {
   return createIOException(exceptions);
 }
+
+/**
+ * @return whether any exception was added.
+ */
+public boolean isEmpty() {
+  if (exceptions == null) {
+return true;
+  }
+  return exceptions.isEmpty();
+}
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46edbedd/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index f7ecf44..73b1f64 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -35,6 +35,7 @@ import 
org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.ValueQueue;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
@@ -84,6 +85,7 @@ import java.util.Collection;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
@@ -111,6 +113,10 @@ public class TestKMS {
 
   private SSLFactory sslFactory;
 
+  // Keep track of all key providers created during a test case, so they can be
+  // closed at test tearDown.
+  private List providersCreated = new LinkedList<>();
+
   @Rule
   public final Timeout testTimeout = new Timeout(18);
 
@@ -144,13 +150,17 @@ public class TestKMS {
 
   protected KeyProvider createProvider(URI uri, Configuration conf)
   throws IOException {
-return new LoadBalancingKMSClientProvider(
-new KMSClientProvider[] { new KMSClientProvider(uri, conf) }, conf);
+final KeyProvider ret = new LoadBalancingKMSClientProvider(
+new KMSClientProvider[] {new KMSClientProvider(uri, conf)}, conf);
+providersCreated.add(ret);
+return ret;
   }
 
   private KMSClientProvider createKMSClientProvider(URI uri, Configuration 
conf)
   throws IOException {
-return new KMSClientProvider(uri, conf);
+final KMSClientProvider ret = new KMSClientProvider(uri, conf);
+providersCreated.add(ret);
+return ret;
   }
 
   protected  T runServer(String keystore, String password, File confDir,
@@ -311,13 +321,28 @@ public class TestKMS {
   }
 
   @After
-  public void tearDownMiniKdc() throws Exception {
+  public void tearDown() throws Exception {
 if (kdc != null) {
   kdc.stop();
   kdc = null;
 }
 UserGroupInformation.setShouldRenewImmediatelyForTests(false);
 UserGroupInformation.reset();
+if (!providersCreated.isEmpty()) {
+  final 

[2/3] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-10 Thread xiao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/72acda14/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 73b1f64..8b37511 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -1,3 +1,4 @@
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -31,26 +32,35 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersi
 import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
+import org.apache.hadoop.crypto.key.kms.KMSTokenRenewer;
 import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.TestLoadBalancingKMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.ValueQueue;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.MultipleIOException;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.KMSUtil;
+import org.apache.hadoop.util.KMSUtilFaultInjector;
 import org.apache.hadoop.util.Time;
 import org.apache.http.client.utils.URIBuilder;
 import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
@@ -71,7 +81,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.Writer;
 import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.SocketTimeoutException;
 import java.net.URI;
@@ -96,6 +105,10 @@ import java.util.concurrent.LinkedBlockingQueue;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
+import static org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_KIND;
+import static 
org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_LEGACY_KIND;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -113,6 +126,20 @@ public class TestKMS {
 
   private SSLFactory sslFactory;
 
+  private final KMSUtilFaultInjector oldInjector =
+  KMSUtilFaultInjector.get();
+
+  // Injector to create providers with different ports. Can only happen in 
tests
+  private final KMSUtilFaultInjector testInjector =
+  new KMSUtilFaultInjector() {
+@Override
+public KeyProvider createKeyProviderForTests(String value,
+Configuration conf) throws IOException {
+  return TestLoadBalancingKMSClientProvider
+  .createKeyProviderForTests(value, conf);
+}
+  };
+
   // Keep track of all key providers created during a test case, so they can be
   // closed at test tearDown.
   private List providersCreated = new LinkedList<>();
@@ -122,7 +149,12 @@ public class TestKMS {
 
   @Before
   public void setUp() throws Exception {
-setUpMiniKdc();
+GenericTestUtils.setLogLevel(KMSClientProvider.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticationHandler.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticator.LOG, Level.TRACE);
+GenericTestUtils.setLogLevel(KMSUtil.LOG, Level.TRACE);
 // resetting kerberos security
 Configuration conf = new Configuration();
 

[2/3] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-10 Thread xiao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d6f65f2/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 1517b04..c171143 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -1,3 +1,4 @@
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -31,26 +32,35 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersi
 import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
+import org.apache.hadoop.crypto.key.kms.KMSTokenRenewer;
 import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.TestLoadBalancingKMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.ValueQueue;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.MultipleIOException;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.KMSUtil;
+import org.apache.hadoop.util.KMSUtilFaultInjector;
 import org.apache.hadoop.util.Time;
 import org.apache.http.client.utils.URIBuilder;
 import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
@@ -71,7 +81,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.Writer;
 import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.SocketTimeoutException;
 import java.net.URI;
@@ -96,6 +105,10 @@ import java.util.concurrent.LinkedBlockingQueue;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
+import static org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_KIND;
+import static 
org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_LEGACY_KIND;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -113,6 +126,20 @@ public class TestKMS {
 
   private SSLFactory sslFactory;
 
+  private final KMSUtilFaultInjector oldInjector =
+  KMSUtilFaultInjector.get();
+
+  // Injector to create providers with different ports. Can only happen in 
tests
+  private final KMSUtilFaultInjector testInjector =
+  new KMSUtilFaultInjector() {
+@Override
+public KeyProvider createKeyProviderForTests(String value,
+Configuration conf) throws IOException {
+  return TestLoadBalancingKMSClientProvider
+  .createKeyProviderForTests(value, conf);
+}
+  };
+
   // Keep track of all key providers created during a test case, so they can be
   // closed at test tearDown.
   private List providersCreated = new LinkedList<>();
@@ -122,7 +149,12 @@ public class TestKMS {
 
   @Before
   public void setUp() throws Exception {
-setUpMiniKdc();
+GenericTestUtils.setLogLevel(KMSClientProvider.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticationHandler.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticator.LOG, Level.TRACE);
+GenericTestUtils.setLogLevel(KMSUtil.LOG, Level.TRACE);
 // resetting kerberos security
 Configuration conf = new Configuration();
 

[3/3] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-10 Thread xiao
HADOOP-14445. Delegation tokens are not shared between KMS instances. 
Contributed by Xiao Chen and Rushabh S Shah.

(cherry picked from commit 583fa6ed48ad3df40bcaa9c591d5ccd07ce3ea81)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72acda14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72acda14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72acda14

Branch: refs/heads/branch-3.0
Commit: 72acda1449893d90d410291a5a7f04580f8eb562
Parents: 46edbed
Author: Xiao Chen 
Authored: Tue Apr 10 15:26:33 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 15:45:48 2018 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   | 212 
 .../crypto/key/kms/KMSDelegationToken.java  |  22 +-
 .../crypto/key/kms/KMSLegacyTokenRenewer.java   |  56 ++
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java  | 103 
 .../hadoop/crypto/key/kms/package-info.java |  18 +
 .../fs/CommonConfigurationKeysPublic.java   |  10 +
 .../web/DelegationTokenAuthenticatedURL.java|  21 +-
 .../DelegationTokenAuthenticationHandler.java   |   8 +-
 .../web/DelegationTokenAuthenticator.java   |   2 +-
 .../java/org/apache/hadoop/util/KMSUtil.java|  45 +-
 .../hadoop/util/KMSUtilFaultInjector.java   |  49 ++
 ...apache.hadoop.security.token.TokenIdentifier |   1 +
 ...rg.apache.hadoop.security.token.TokenRenewer |   3 +-
 .../src/main/resources/core-default.xml |  20 +
 .../crypto/key/kms/TestKMSClientProvider.java   | 162 ++
 .../kms/TestLoadBalancingKMSClientProvider.java |  67 ++-
 .../org/apache/hadoop/util/TestKMSUtil.java |  65 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 519 ---
 18 files changed, 1180 insertions(+), 203 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72acda14/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 2eb2e21..f97fde7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -36,8 +36,9 @@ import 
org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenRenewer;
+import org.apache.hadoop.security.token.TokenSelector;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.hadoop.util.KMSUtil;
@@ -82,6 +83,8 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_DEFAULT;
 import static org.apache.hadoop.util.KMSUtil.checkNotEmpty;
 import static org.apache.hadoop.util.KMSUtil.checkNotNull;
 import static org.apache.hadoop.util.KMSUtil.parseJSONEncKeyVersion;
@@ -96,16 +99,13 @@ import static 
org.apache.hadoop.util.KMSUtil.parseJSONMetadata;
 public class KMSClientProvider extends KeyProvider implements CryptoExtension,
 KeyProviderDelegationTokenExtension.DelegationTokenExtension {
 
-  private static final Logger LOG =
+  public static final Logger LOG =
   LoggerFactory.getLogger(KMSClientProvider.class);
 
   private static final String INVALID_SIGNATURE = "Invalid signature";
 
   private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous 
requests are disallowed";
 
-  public static final String TOKEN_KIND_STR = 
KMSDelegationToken.TOKEN_KIND_STR;
-  public static final Text TOKEN_KIND = KMSDelegationToken.TOKEN_KIND;
-
   public static final String SCHEME_NAME = "kms";
 
   private static final String UTF8 = "UTF-8";
@@ -133,12 +133,17 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   private static final ObjectWriter WRITER =
   new ObjectMapper().writerWithDefaultPrettyPrinter();
 
+  /* 

[2/2] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-10 Thread xiao
HADOOP-14445. Delegation tokens are not shared between KMS instances. 
Contributed by Xiao Chen and Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/583fa6ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/583fa6ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/583fa6ed

Branch: refs/heads/trunk
Commit: 583fa6ed48ad3df40bcaa9c591d5ccd07ce3ea81
Parents: e813975
Author: Xiao Chen 
Authored: Tue Apr 10 15:26:33 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 15:38:25 2018 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   | 212 
 .../crypto/key/kms/KMSDelegationToken.java  |  22 +-
 .../crypto/key/kms/KMSLegacyTokenRenewer.java   |  56 ++
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java  | 103 
 .../hadoop/crypto/key/kms/package-info.java |  18 +
 .../fs/CommonConfigurationKeysPublic.java   |  10 +
 .../web/DelegationTokenAuthenticatedURL.java|  21 +-
 .../DelegationTokenAuthenticationHandler.java   |   8 +-
 .../web/DelegationTokenAuthenticator.java   |   2 +-
 .../java/org/apache/hadoop/util/KMSUtil.java|  45 +-
 .../hadoop/util/KMSUtilFaultInjector.java   |  49 ++
 ...apache.hadoop.security.token.TokenIdentifier |   1 +
 ...rg.apache.hadoop.security.token.TokenRenewer |   3 +-
 .../src/main/resources/core-default.xml |  20 +
 .../crypto/key/kms/TestKMSClientProvider.java   | 162 ++
 .../kms/TestLoadBalancingKMSClientProvider.java |  67 ++-
 .../org/apache/hadoop/util/TestKMSUtil.java |  65 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 519 ---
 18 files changed, 1180 insertions(+), 203 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/583fa6ed/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 2eb2e21..f97fde7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -36,8 +36,9 @@ import 
org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenRenewer;
+import org.apache.hadoop.security.token.TokenSelector;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.hadoop.util.KMSUtil;
@@ -82,6 +83,8 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_DEFAULT;
 import static org.apache.hadoop.util.KMSUtil.checkNotEmpty;
 import static org.apache.hadoop.util.KMSUtil.checkNotNull;
 import static org.apache.hadoop.util.KMSUtil.parseJSONEncKeyVersion;
@@ -96,16 +99,13 @@ import static 
org.apache.hadoop.util.KMSUtil.parseJSONMetadata;
 public class KMSClientProvider extends KeyProvider implements CryptoExtension,
 KeyProviderDelegationTokenExtension.DelegationTokenExtension {
 
-  private static final Logger LOG =
+  public static final Logger LOG =
   LoggerFactory.getLogger(KMSClientProvider.class);
 
   private static final String INVALID_SIGNATURE = "Invalid signature";
 
   private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous 
requests are disallowed";
 
-  public static final String TOKEN_KIND_STR = 
KMSDelegationToken.TOKEN_KIND_STR;
-  public static final Text TOKEN_KIND = KMSDelegationToken.TOKEN_KIND;
-
   public static final String SCHEME_NAME = "kms";
 
   private static final String UTF8 = "UTF-8";
@@ -133,12 +133,17 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   private static final ObjectWriter WRITER =
   new ObjectMapper().writerWithDefaultPrettyPrinter();
 
+  /* dtService defines the token service value for the kms token.
+   * The 

[1/2] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-10 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk e81397545 -> 583fa6ed4


http://git-wip-us.apache.org/repos/asf/hadoop/blob/583fa6ed/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 1517b04..c171143 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -1,3 +1,4 @@
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -31,26 +32,35 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersi
 import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
+import org.apache.hadoop.crypto.key.kms.KMSTokenRenewer;
 import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.TestLoadBalancingKMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.ValueQueue;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.MultipleIOException;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.KMSUtil;
+import org.apache.hadoop.util.KMSUtilFaultInjector;
 import org.apache.hadoop.util.Time;
 import org.apache.http.client.utils.URIBuilder;
 import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
@@ -71,7 +81,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.Writer;
 import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.SocketTimeoutException;
 import java.net.URI;
@@ -96,6 +105,10 @@ import java.util.concurrent.LinkedBlockingQueue;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
+import static org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_KIND;
+import static 
org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_LEGACY_KIND;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -113,6 +126,20 @@ public class TestKMS {
 
   private SSLFactory sslFactory;
 
+  private final KMSUtilFaultInjector oldInjector =
+  KMSUtilFaultInjector.get();
+
+  // Injector to create providers with different ports. Can only happen in 
tests
+  private final KMSUtilFaultInjector testInjector =
+  new KMSUtilFaultInjector() {
+@Override
+public KeyProvider createKeyProviderForTests(String value,
+Configuration conf) throws IOException {
+  return TestLoadBalancingKMSClientProvider
+  .createKeyProviderForTests(value, conf);
+}
+  };
+
   // Keep track of all key providers created during a test case, so they can be
   // closed at test tearDown.
   private List providersCreated = new LinkedList<>();
@@ -122,7 +149,12 @@ public class TestKMS {
 
   @Before
   public void setUp() throws Exception {
-setUpMiniKdc();
+GenericTestUtils.setLogLevel(KMSClientProvider.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticationHandler.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticator.LOG, Level.TRACE);
+GenericTestUtils.setLogLevel(KMSUtil.LOG, Level.TRACE);
 // 

hadoop git commit: HADOOP-15357. Configuration.getPropsWithPrefix no longer does variable substitution. Contributed by Jim Brennan

2018-04-10 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 33cf224dc -> 87485d40c


HADOOP-15357. Configuration.getPropsWithPrefix no longer does variable 
substitution. Contributed by Jim Brennan

(cherry picked from commit 0fb1457d862660e14c3f5cf42d2cc5a2475ee097)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87485d40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87485d40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87485d40

Branch: refs/heads/branch-2.9
Commit: 87485d40cdade8349e088a5bcbe603fd585390af
Parents: 33cf224
Author: Jason Lowe 
Authored: Tue Apr 10 16:44:03 2018 -0500
Committer: Jason Lowe 
Committed: Tue Apr 10 17:00:50 2018 -0500

--
 .../org/apache/hadoop/conf/Configuration.java   | 11 -
 .../apache/hadoop/conf/TestConfiguration.java   | 25 +++-
 2 files changed, 23 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87485d40/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index bfb1a67..802242f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2669,15 +2669,12 @@ public class Configuration implements 
Iterable>,
*/
   public Map getPropsWithPrefix(String confPrefix) {
 Properties props = getProps();
-Enumeration e = props.propertyNames();
 Map configMap = new HashMap<>();
-String name = null;
-while (e.hasMoreElements()) {
-  name = (String) e.nextElement();
+for (String name : props.stringPropertyNames()) {
   if (name.startsWith(confPrefix)) {
-String value = props.getProperty(name);
-name = name.substring(confPrefix.length());
-configMap.put(name, value);
+String value = get(name);
+String keyName = name.substring(confPrefix.length());
+configMap.put(keyName, value);
   }
 }
 return configMap;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87485d40/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index bceae3c..214be63 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -1966,16 +1966,29 @@ public class TestConfiguration extends TestCase {
   public void testGettingPropertiesWithPrefix() throws Exception {
 Configuration conf = new Configuration();
 for (int i = 0; i < 10; i++) {
-  conf.set("prefix" + ".name" + i, "value");
+  conf.set("prefix." + "name" + i, "value" + i);
 }
 conf.set("different.prefix" + ".name", "value");
-Map props = conf.getPropsWithPrefix("prefix");
-assertEquals(props.size(), 10);
+Map prefixedProps = conf.getPropsWithPrefix("prefix.");
+assertEquals(prefixedProps.size(), 10);
+for (int i = 0; i < 10; i++) {
+  assertEquals("value" + i, prefixedProps.get("name" + i));
+}
 
+// Repeat test with variable substitution
+conf.set("foo", "bar");
+for (int i = 0; i < 10; i++) {
+  conf.set("subprefix." + "subname" + i, "value_${foo}" + i);
+}
+prefixedProps = conf.getPropsWithPrefix("subprefix.");
+assertEquals(prefixedProps.size(), 10);
+for (int i = 0; i < 10; i++) {
+  assertEquals("value_bar" + i, prefixedProps.get("subname" + i));
+}
 // test call with no properties for a given prefix
-props = conf.getPropsWithPrefix("none");
-assertNotNull(props.isEmpty());
-assertTrue(props.isEmpty());
+prefixedProps = conf.getPropsWithPrefix("none");
+assertNotNull(prefixedProps.isEmpty());
+assertTrue(prefixedProps.isEmpty());
   }
 
   public static void main(String[] argv) throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

hadoop git commit: HADOOP-15357. Configuration.getPropsWithPrefix no longer does variable substitution. Contributed by Jim Brennan

2018-04-10 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ea51ef44f -> 0fb1457d8


HADOOP-15357. Configuration.getPropsWithPrefix no longer does variable 
substitution. Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fb1457d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fb1457d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fb1457d

Branch: refs/heads/branch-2
Commit: 0fb1457d862660e14c3f5cf42d2cc5a2475ee097
Parents: ea51ef4
Author: Jason Lowe 
Authored: Tue Apr 10 16:44:03 2018 -0500
Committer: Jason Lowe 
Committed: Tue Apr 10 16:59:31 2018 -0500

--
 .../org/apache/hadoop/conf/Configuration.java   | 11 -
 .../apache/hadoop/conf/TestConfiguration.java   | 25 +++-
 2 files changed, 23 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb1457d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index bfb1a67..802242f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2669,15 +2669,12 @@ public class Configuration implements 
Iterable>,
*/
   public Map getPropsWithPrefix(String confPrefix) {
 Properties props = getProps();
-Enumeration e = props.propertyNames();
 Map configMap = new HashMap<>();
-String name = null;
-while (e.hasMoreElements()) {
-  name = (String) e.nextElement();
+for (String name : props.stringPropertyNames()) {
   if (name.startsWith(confPrefix)) {
-String value = props.getProperty(name);
-name = name.substring(confPrefix.length());
-configMap.put(name, value);
+String value = get(name);
+String keyName = name.substring(confPrefix.length());
+configMap.put(keyName, value);
   }
 }
 return configMap;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb1457d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index bceae3c..214be63 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -1966,16 +1966,29 @@ public class TestConfiguration extends TestCase {
   public void testGettingPropertiesWithPrefix() throws Exception {
 Configuration conf = new Configuration();
 for (int i = 0; i < 10; i++) {
-  conf.set("prefix" + ".name" + i, "value");
+  conf.set("prefix." + "name" + i, "value" + i);
 }
 conf.set("different.prefix" + ".name", "value");
-Map props = conf.getPropsWithPrefix("prefix");
-assertEquals(props.size(), 10);
+Map prefixedProps = conf.getPropsWithPrefix("prefix.");
+assertEquals(prefixedProps.size(), 10);
+for (int i = 0; i < 10; i++) {
+  assertEquals("value" + i, prefixedProps.get("name" + i));
+}
 
+// Repeat test with variable substitution
+conf.set("foo", "bar");
+for (int i = 0; i < 10; i++) {
+  conf.set("subprefix." + "subname" + i, "value_${foo}" + i);
+}
+prefixedProps = conf.getPropsWithPrefix("subprefix.");
+assertEquals(prefixedProps.size(), 10);
+for (int i = 0; i < 10; i++) {
+  assertEquals("value_bar" + i, prefixedProps.get("subname" + i));
+}
 // test call with no properties for a given prefix
-props = conf.getPropsWithPrefix("none");
-assertNotNull(props.isEmpty());
-assertTrue(props.isEmpty());
+prefixedProps = conf.getPropsWithPrefix("none");
+assertNotNull(prefixedProps.isEmpty());
+assertTrue(prefixedProps.isEmpty());
   }
 
   public static void main(String[] argv) throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15357. Configuration.getPropsWithPrefix no longer does variable substitution. Contributed by Jim Brennan

2018-04-10 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 afbdd8fdc -> eef5d1947


HADOOP-15357. Configuration.getPropsWithPrefix no longer does variable 
substitution. Contributed by Jim Brennan

(cherry picked from commit e81397545a273cf9a090010eb644b836e0ef8c7b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eef5d194
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eef5d194
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eef5d194

Branch: refs/heads/branch-3.0
Commit: eef5d1947e22fec89c77f47a6f462c9ce71a13f6
Parents: afbdd8f
Author: Jason Lowe 
Authored: Tue Apr 10 16:44:03 2018 -0500
Committer: Jason Lowe 
Committed: Tue Apr 10 16:54:17 2018 -0500

--
 .../org/apache/hadoop/conf/Configuration.java   | 11 +++--
 .../apache/hadoop/conf/TestConfiguration.java   | 26 +++-
 2 files changed, 24 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eef5d194/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 7eaf00e..0d4c30a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2833,15 +2833,12 @@ public class Configuration implements 
Iterable>,
*/
   public Map getPropsWithPrefix(String confPrefix) {
 Properties props = getProps();
-Enumeration e = props.propertyNames();
 Map configMap = new HashMap<>();
-String name = null;
-while (e.hasMoreElements()) {
-  name = (String) e.nextElement();
+for (String name : props.stringPropertyNames()) {
   if (name.startsWith(confPrefix)) {
-String value = props.getProperty(name);
-name = name.substring(confPrefix.length());
-configMap.put(name, value);
+String value = get(name);
+String keyName = name.substring(confPrefix.length());
+configMap.put(keyName, value);
   }
 }
 return configMap;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eef5d194/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 1ebfa35..b11349e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -2285,19 +2285,33 @@ public class TestConfiguration {
 FileUtil.fullyDelete(tmpDir);
   }
 
+  @Test
   public void testGettingPropertiesWithPrefix() throws Exception {
 Configuration conf = new Configuration();
 for (int i = 0; i < 10; i++) {
-  conf.set("prefix" + ".name" + i, "value");
+  conf.set("prefix." + "name" + i, "value" + i);
 }
 conf.set("different.prefix" + ".name", "value");
-Map props = conf.getPropsWithPrefix("prefix");
-assertEquals(props.size(), 10);
+Map prefixedProps = conf.getPropsWithPrefix("prefix.");
+assertEquals(prefixedProps.size(), 10);
+for (int i = 0; i < 10; i++) {
+  assertEquals("value" + i, prefixedProps.get("name" + i));
+}
 
+// Repeat test with variable substitution
+conf.set("foo", "bar");
+for (int i = 0; i < 10; i++) {
+  conf.set("subprefix." + "subname" + i, "value_${foo}" + i);
+}
+prefixedProps = conf.getPropsWithPrefix("subprefix.");
+assertEquals(prefixedProps.size(), 10);
+for (int i = 0; i < 10; i++) {
+  assertEquals("value_bar" + i, prefixedProps.get("subname" + i));
+}
 // test call with no properties for a given prefix
-props = conf.getPropsWithPrefix("none");
-assertNotNull(props.isEmpty());
-assertTrue(props.isEmpty());
+prefixedProps = conf.getPropsWithPrefix("none");
+assertNotNull(prefixedProps.isEmpty());
+assertTrue(prefixedProps.isEmpty());
   }
 
   public static void main(String[] argv) throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, 

hadoop git commit: HADOOP-15357. Configuration.getPropsWithPrefix no longer does variable substitution. Contributed by Jim Brennan

2018-04-10 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 3414bf6db -> ffceb907f


HADOOP-15357. Configuration.getPropsWithPrefix no longer does variable 
substitution. Contributed by Jim Brennan

(cherry picked from commit e81397545a273cf9a090010eb644b836e0ef8c7b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffceb907
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffceb907
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffceb907

Branch: refs/heads/branch-3.1
Commit: ffceb907fe6290ae462cadaf3942c0db3b4c9525
Parents: 3414bf6
Author: Jason Lowe 
Authored: Tue Apr 10 16:44:03 2018 -0500
Committer: Jason Lowe 
Committed: Tue Apr 10 16:49:27 2018 -0500

--
 .../org/apache/hadoop/conf/Configuration.java   | 11 +++--
 .../apache/hadoop/conf/TestConfiguration.java   | 26 +++-
 2 files changed, 24 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffceb907/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 6557356..0b2196b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2866,15 +2866,12 @@ public class Configuration implements 
Iterable>,
*/
   public Map getPropsWithPrefix(String confPrefix) {
 Properties props = getProps();
-Enumeration e = props.propertyNames();
 Map configMap = new HashMap<>();
-String name = null;
-while (e.hasMoreElements()) {
-  name = (String) e.nextElement();
+for (String name : props.stringPropertyNames()) {
   if (name.startsWith(confPrefix)) {
-String value = props.getProperty(name);
-name = name.substring(confPrefix.length());
-configMap.put(name, value);
+String value = get(name);
+String keyName = name.substring(confPrefix.length());
+configMap.put(keyName, value);
   }
 }
 return configMap;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffceb907/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index f1d68cd..265e007 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -2320,19 +2320,33 @@ public class TestConfiguration {
 FileUtil.fullyDelete(tmpDir);
   }
 
+  @Test
   public void testGettingPropertiesWithPrefix() throws Exception {
 Configuration conf = new Configuration();
 for (int i = 0; i < 10; i++) {
-  conf.set("prefix" + ".name" + i, "value");
+  conf.set("prefix." + "name" + i, "value" + i);
 }
 conf.set("different.prefix" + ".name", "value");
-Map props = conf.getPropsWithPrefix("prefix");
-assertEquals(props.size(), 10);
+Map prefixedProps = conf.getPropsWithPrefix("prefix.");
+assertEquals(prefixedProps.size(), 10);
+for (int i = 0; i < 10; i++) {
+  assertEquals("value" + i, prefixedProps.get("name" + i));
+}
 
+// Repeat test with variable substitution
+conf.set("foo", "bar");
+for (int i = 0; i < 10; i++) {
+  conf.set("subprefix." + "subname" + i, "value_${foo}" + i);
+}
+prefixedProps = conf.getPropsWithPrefix("subprefix.");
+assertEquals(prefixedProps.size(), 10);
+for (int i = 0; i < 10; i++) {
+  assertEquals("value_bar" + i, prefixedProps.get("subname" + i));
+}
 // test call with no properties for a given prefix
-props = conf.getPropsWithPrefix("none");
-assertNotNull(props.isEmpty());
-assertTrue(props.isEmpty());
+prefixedProps = conf.getPropsWithPrefix("none");
+assertNotNull(prefixedProps.isEmpty());
+assertTrue(prefixedProps.isEmpty());
   }
 
   public static void main(String[] argv) throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, 

hadoop git commit: HADOOP-15357. Configuration.getPropsWithPrefix no longer does variable substitution. Contributed by Jim Brennan

2018-04-10 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk d55379903 -> e81397545


HADOOP-15357. Configuration.getPropsWithPrefix no longer does variable 
substitution. Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8139754
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8139754
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8139754

Branch: refs/heads/trunk
Commit: e81397545a273cf9a090010eb644b836e0ef8c7b
Parents: d553799
Author: Jason Lowe 
Authored: Tue Apr 10 16:44:03 2018 -0500
Committer: Jason Lowe 
Committed: Tue Apr 10 16:44:55 2018 -0500

--
 .../org/apache/hadoop/conf/Configuration.java   | 11 +++--
 .../apache/hadoop/conf/TestConfiguration.java   | 26 +++-
 2 files changed, 24 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8139754/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 78a2e9f..f1e2a9d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2869,15 +2869,12 @@ public class Configuration implements 
Iterable>,
*/
   public Map getPropsWithPrefix(String confPrefix) {
 Properties props = getProps();
-Enumeration e = props.propertyNames();
 Map configMap = new HashMap<>();
-String name = null;
-while (e.hasMoreElements()) {
-  name = (String) e.nextElement();
+for (String name : props.stringPropertyNames()) {
   if (name.startsWith(confPrefix)) {
-String value = props.getProperty(name);
-name = name.substring(confPrefix.length());
-configMap.put(name, value);
+String value = get(name);
+String keyName = name.substring(confPrefix.length());
+configMap.put(keyName, value);
   }
 }
 return configMap;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8139754/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index b0bb0d7..33a9880 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -2320,19 +2320,33 @@ public class TestConfiguration {
 FileUtil.fullyDelete(tmpDir);
   }
 
+  @Test
   public void testGettingPropertiesWithPrefix() throws Exception {
 Configuration conf = new Configuration();
 for (int i = 0; i < 10; i++) {
-  conf.set("prefix" + ".name" + i, "value");
+  conf.set("prefix." + "name" + i, "value" + i);
 }
 conf.set("different.prefix" + ".name", "value");
-Map props = conf.getPropsWithPrefix("prefix");
-assertEquals(props.size(), 10);
+Map prefixedProps = conf.getPropsWithPrefix("prefix.");
+assertEquals(prefixedProps.size(), 10);
+for (int i = 0; i < 10; i++) {
+  assertEquals("value" + i, prefixedProps.get("name" + i));
+}
 
+// Repeat test with variable substitution
+conf.set("foo", "bar");
+for (int i = 0; i < 10; i++) {
+  conf.set("subprefix." + "subname" + i, "value_${foo}" + i);
+}
+prefixedProps = conf.getPropsWithPrefix("subprefix.");
+assertEquals(prefixedProps.size(), 10);
+for (int i = 0; i < 10; i++) {
+  assertEquals("value_bar" + i, prefixedProps.get("subname" + i));
+}
 // test call with no properties for a given prefix
-props = conf.getPropsWithPrefix("none");
-assertNotNull(props.isEmpty());
-assertTrue(props.isEmpty());
+prefixedProps = conf.getPropsWithPrefix("none");
+assertNotNull(prefixedProps.isEmpty());
+assertTrue(prefixedProps.isEmpty());
   }
 
   public static void main(String[] argv) throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7984. Improved YARN service stop/destroy and clean up. Contributed by Billie Rinaldi

2018-04-10 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8ab776d61 -> d55379903


YARN-7984. Improved YARN service stop/destroy and clean up.
   Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5537990
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5537990
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5537990

Branch: refs/heads/trunk
Commit: d553799030a5a64df328319aceb35734d0b2de20
Parents: 8ab776d
Author: Eric Yang 
Authored: Tue Apr 10 17:40:49 2018 -0400
Committer: Eric Yang 
Committed: Tue Apr 10 17:40:49 2018 -0400

--
 .../hadoop/yarn/service/webapp/ApiServer.java   | 47 +++---
 .../hadoop/yarn/service/ServiceClientTest.java  |  6 ++
 .../hadoop/yarn/service/TestApiServer.java  | 26 ++
 .../yarn/service/client/ServiceClient.java  | 93 +++-
 .../hadoop/yarn/service/ServiceTestUtils.java   | 15 +++-
 .../yarn/service/TestYarnNativeServices.java| 42 -
 6 files changed, 191 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5537990/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index 59ee05d..14c77f6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -231,30 +231,40 @@ public class ApiServer {
   e.getCause().getMessage());
 } catch (YarnException | FileNotFoundException e) {
   return formatResponse(Status.NOT_FOUND, e.getMessage());
-} catch (IOException | InterruptedException e) {
+} catch (Exception e) {
   LOG.error("Fail to stop service: {}", e);
   return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());
 }
   }
 
   private Response stopService(String appName, boolean destroy,
-  final UserGroupInformation ugi) throws IOException,
-  InterruptedException, YarnException, FileNotFoundException {
+  final UserGroupInformation ugi) throws Exception {
 int result = ugi.doAs(new PrivilegedExceptionAction() {
   @Override
-  public Integer run() throws IOException, YarnException,
-  FileNotFoundException {
+  public Integer run() throws Exception {
 int result = 0;
 ServiceClient sc = getServiceClient();
 sc.init(YARN_CONFIG);
 sc.start();
-result = sc.actionStop(appName, destroy);
-if (result == EXIT_SUCCESS) {
-  LOG.info("Successfully stopped service {}", appName);
+Exception stopException = null;
+try {
+  result = sc.actionStop(appName, destroy);
+  if (result == EXIT_SUCCESS) {
+LOG.info("Successfully stopped service {}", appName);
+  }
+} catch (Exception e) {
+  LOG.info("Got exception stopping service", e);
+  stopException = e;
 }
 if (destroy) {
   result = sc.actionDestroy(appName);
-  LOG.info("Successfully deleted service {}", appName);
+  if (result == EXIT_SUCCESS) {
+LOG.info("Successfully deleted service {}", appName);
+  }
+} else {
+  if (stopException != null) {
+throw stopException;
+  }
 }
 sc.close();
 return result;
@@ -262,8 +272,21 @@ public class ApiServer {
 });
 ServiceStatus serviceStatus = new ServiceStatus();
 if (destroy) {
-  serviceStatus.setDiagnostics("Successfully destroyed service " +
-  appName);
+  if (result == EXIT_SUCCESS) {
+serviceStatus.setDiagnostics("Successfully destroyed service " +
+appName);
+  } else {
+if (result == EXIT_NOT_FOUND) {
+  serviceStatus
+  .setDiagnostics("Service " + appName + " doesn't exist");
+  return formatResponse(Status.BAD_REQUEST, serviceStatus);
+} else {
+  serviceStatus
+  .setDiagnostics("Service " + appName + " error cleaning up " +
+  "registry");
+  return formatResponse(Status.INTERNAL_SERVER_ERROR, serviceStatus);
+   

[hadoop] Git Push Summary

2018-04-10 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8 [created] df3ff9042

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: HDFS-13364. RBF: Support NamenodeProtocol in the Router. Contributed by Inigo Goiri.

2018-04-10 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 bb3c07fa3 -> df3ff9042


HDFS-13364. RBF: Support NamenodeProtocol in the Router. Contributed by Inigo 
Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2be64eb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2be64eb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2be64eb2

Branch: refs/heads/HDFS-7240
Commit: 2be64eb201134502a92f7239bef8aa780771ca0b
Parents: 1077392
Author: Yiqun Lin 
Authored: Tue Apr 3 15:08:40 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Apr 3 15:08:40 2018 +0800

--
 .../federation/router/ConnectionContext.java|  35 +++-
 .../federation/router/ConnectionManager.java|  10 +-
 .../federation/router/ConnectionPool.java   |  98 +-
 .../federation/router/ConnectionPoolId.java |  19 +-
 .../server/federation/router/RemoteMethod.java  |  68 ++-
 .../router/RouterNamenodeProtocol.java  | 187 +++
 .../federation/router/RouterRpcClient.java  |  56 --
 .../federation/router/RouterRpcServer.java  | 111 ++-
 .../server/federation/MiniRouterDFSCluster.java |   8 +
 .../router/TestConnectionManager.java   |  56 +-
 .../server/federation/router/TestRouterRpc.java | 115 ++--
 11 files changed, 698 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2be64eb2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java
index 1d27b51..7e779b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java
@@ -17,8 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import java.net.InetSocketAddress;
+
 import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.ipc.RPC;
 
 /**
@@ -26,18 +27,24 @@ import org.apache.hadoop.ipc.RPC;
  * a connection, it increments a counter to mark it as active. Once the client
  * is done with the connection, it decreases the counter. It also takes care of
  * closing the connection once is not active.
+ *
+ * The protocols currently used are:
+ * 
+ * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol}
+ * {@link org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol}
+ * 
  */
 public class ConnectionContext {
 
   /** Client for the connection. */
-  private final ProxyAndInfo client;
+  private final ProxyAndInfo client;
   /** How many threads are using this connection. */
   private int numThreads = 0;
   /** If the connection is closed. */
   private boolean closed = false;
 
 
-  public ConnectionContext(ProxyAndInfo connection) {
+  public ConnectionContext(ProxyAndInfo connection) {
 this.client = connection;
   }
 
@@ -74,7 +81,7 @@ public class ConnectionContext {
*
* @return Connection client.
*/
-  public synchronized ProxyAndInfo getClient() {
+  public synchronized ProxyAndInfo getClient() {
 this.numThreads++;
 return this.client;
   }
@@ -96,9 +103,27 @@ public class ConnectionContext {
   public synchronized void close() {
 this.closed = true;
 if (this.numThreads == 0) {
-  ClientProtocol proxy = this.client.getProxy();
+  Object proxy = this.client.getProxy();
   // Nobody should be using this anymore so it should close right away
   RPC.stopProxy(proxy);
 }
   }
+
+  @Override
+  public String toString() {
+InetSocketAddress addr = this.client.getAddress();
+Object proxy = this.client.getProxy();
+Class clazz = proxy.getClass();
+
+StringBuilder sb = new StringBuilder();
+sb.append(clazz.getSimpleName());
+sb.append("@");
+sb.append(addr);
+sb.append("x");
+sb.append(numThreads);
+if (closed) {
+  sb.append("[CLOSED]");
+}
+return sb.toString();
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2be64eb2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
--
diff --git 

[49/50] [abbrv] hadoop git commit: HADOOP-15340. Provide meaningful RPC server name for RpcMetrics. Contributed by Elek Marton.

2018-04-10 Thread xyao
HADOOP-15340. Provide meaningful RPC server name for RpcMetrics. Contributed by 
Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ab776d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ab776d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ab776d6

Branch: refs/heads/HDFS-7240
Commit: 8ab776d61e569c12ec62024415ff68e5d3b10141
Parents: e76c2ae
Author: Xiaoyu Yao 
Authored: Tue Apr 10 11:42:54 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue Apr 10 11:42:54 2018 -0700

--
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  5 +-
 .../main/java/org/apache/hadoop/ipc/RPC.java| 46 +---
 .../main/java/org/apache/hadoop/ipc/Server.java |  9 
 .../apache/hadoop/ipc/WritableRpcEngine.java|  2 +-
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   | 11 +++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 56 +++-
 6 files changed, 117 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab776d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 639bbad..70fde60 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -419,8 +419,9 @@ public class ProtobufRpcEngine implements RpcEngine {
 String portRangeConfig)
 throws IOException {
   super(bindAddress, port, null, numHandlers,
-  numReaders, queueSizePerHandler, conf, classNameBase(protocolImpl
-  .getClass().getName()), secretManager, portRangeConfig);
+  numReaders, queueSizePerHandler, conf,
+  serverNameFromClass(protocolImpl.getClass()), secretManager,
+  portRangeConfig);
   this.verbose = verbose;  
   registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass,
   protocolImpl);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab776d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 8f8eda6..9cfadc7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -35,6 +35,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.HashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import javax.net.SocketFactory;
 
@@ -808,13 +810,45 @@ public class RPC {
   
   /** An RPC Server. */
   public abstract static class Server extends org.apache.hadoop.ipc.Server {
-   boolean verbose;
-   static String classNameBase(String className) {
-  String[] names = className.split("\\.", -1);
-  if (names == null || names.length == 0) {
-return className;
+
+boolean verbose;
+
+private static final Pattern COMPLEX_SERVER_NAME_PATTERN =
+Pattern.compile("(?:[^\\$]*\\$)*([A-Za-z][^\\$]+)(?:\\$\\d+)?");
+
+/**
+ * Get a meaningful and short name for a server based on a java class.
+ *
+ * The rules are defined to support the current naming schema of the
+ * generated protobuf classes where the final class usually an anonymous
+ * inner class of an inner class.
+ *
+ * 1. For simple classes it returns with the simple name of the classes
+ * (with the name without package name)
+ *
+ * 2. For inner classes, this is the simple name of the inner class.
+ *
+ * 3.  If it is an Object created from a class factory
+ *   E.g., org.apache.hadoop.ipc.TestRPC$TestClass$2
+ * this method returns parent class TestClass.
+ *
+ * 4. If it is an anonymous class E.g., 'org.apache.hadoop.ipc.TestRPC$10'
+ * serverNameFromClass returns parent class TestRPC.
+ *
+ *
+ */
+static String serverNameFromClass(Class clazz) {
+  String name = clazz.getName();
+  String[] names = clazz.getName().split("\\.", -1);
+  if (names != null && names.length > 0) {
+name = names[names.length - 1];
+  }
+  Matcher matcher = 

[03/50] [abbrv] hadoop git commit: HADOOP-14758. S3GuardTool.prune to handle UnsupportedOperationException. Contributed by Gabor Bota.

2018-04-10 Thread xyao
HADOOP-14758. S3GuardTool.prune to handle UnsupportedOperationException.
Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a174f8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a174f8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a174f8a

Branch: refs/heads/HDFS-7240
Commit: 5a174f8ac6e5f170b427b30bf72ef33f90c20d91
Parents: 93d47a0
Author: Steve Loughran 
Authored: Tue Apr 3 16:31:34 2018 +0100
Committer: Steve Loughran 
Committed: Tue Apr 3 16:31:34 2018 +0100

--
 .../java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a174f8a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
index e764021..a9147ff 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
@@ -966,7 +966,11 @@ public abstract class S3GuardTool extends Configured 
implements Tool {
   long now = System.currentTimeMillis();
   long divide = now - delta;
 
-  getStore().prune(divide);
+  try {
+getStore().prune(divide);
+  } catch (UnsupportedOperationException e){
+errorln("Prune operation not supported in metadata store.");
+  }
 
   out.flush();
   return SUCCESS;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: YARN-8083. [UI2] All YARN related configurations are paged together in conf page. Contributed by Gergely Novák.

2018-04-10 Thread xyao
YARN-8083. [UI2] All YARN related configurations are paged together in conf 
page. Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b17dc9f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b17dc9f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b17dc9f5

Branch: refs/heads/HDFS-7240
Commit: b17dc9f5f54fd91defc1d8646f8229da5fe7ccbb
Parents: ea3849f
Author: Sunil G 
Authored: Fri Apr 6 21:53:14 2018 +0530
Committer: Sunil G 
Committed: Fri Apr 6 21:53:14 2018 +0530

--
 .../main/webapp/app/controllers/yarn-tools/yarn-conf.js   | 10 +-
 .../main/webapp/app/templates/yarn-tools/yarn-conf.hbs|  6 +++---
 2 files changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17dc9f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
index 2984346..cc3be2e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
@@ -24,7 +24,15 @@ import ColumnDef from 'em-table/utils/column-definition';
 import YarnConf from '../../models/yarn-conf';
 
 export default Ember.Controller.extend({
-  tableDefinition: TableDef.create({
+  coreTableDefinition: TableDef.create({
+searchType: 'manual',
+  }),
+
+  mapredTableDefinition: TableDef.create({
+searchType: 'manual',
+  }),
+
+  yarnTableDefinition: TableDef.create({
 searchType: 'manual',
   }),
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17dc9f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
index 09a1410..c2108a2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
@@ -17,12 +17,12 @@
 }}
 
 Core Configuration
-{{em-table columns=columnsFromModel rows=rowsForCoreColumnsFromModel 
rowCount=10 definition=tableDefinition}}
+{{em-table columns=columnsFromModel rows=rowsForCoreColumnsFromModel 
rowCount=10 definition=coreTableDefinition}}
 
 YARN Configuration
-{{em-table columns=columnsFromModel rows=rowsForYarnColumnsFromModel 
rowCount=10 definition=tableDefinition}}
+{{em-table columns=columnsFromModel rows=rowsForYarnColumnsFromModel 
rowCount=10 definition=yarnTableDefinition}}
 
 MapReduce Configuration
-{{em-table columns=columnsFromModel rows=rowsForMapredColumnsFromModel 
rowCount=10 definition=tableDefinition}}
+{{em-table columns=columnsFromModel rows=rowsForMapredColumnsFromModel 
rowCount=10 definition=mapredTableDefinition}}
 
 {{outlet}}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun.

2018-04-10 Thread xyao
HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the 
time. Contributed by Jinglun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac32b357
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac32b357
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac32b357

Branch: refs/heads/HDFS-7240
Commit: ac32b3576da4cc463dff85118163ccfff02215fc
Parents: 821b0de
Author: Inigo Goiri 
Authored: Mon Apr 9 09:16:48 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 09:16:48 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 ++
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac32b357/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 7b9cd64..1c38791 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -79,6 +79,9 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
+  if (currentUsedProxy != null) {
+return method.invoke(currentUsedProxy.proxy, args);
+  }
   Map proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac32b357/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 8d6b02d..4b3fdf9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -43,10 +43,13 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -100,6 +103,37 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
+  public void testRequestNNAfterOneSuccess() throws Exception {
+final AtomicInteger count = new AtomicInteger(0);
+final ClientProtocol goodMock = mock(ClientProtocol.class);
+when(goodMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+Thread.sleep(1000);
+return new long[]{1};
+  }
+});
+final ClientProtocol badMock = mock(ClientProtocol.class);
+when(badMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+throw new IOException("Bad mock !!");
+  }
+});
+
+RequestHedgingProxyProvider provider =
+new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
+createFactory(badMock, goodMock, goodMock, badMock));
+ClientProtocol proxy = provider.getProxy().proxy;
+proxy.getStats();
+assertEquals(2, count.get());
+proxy.getStats();
+assertEquals(3, count.get());
+  }
+
+  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
 final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
 Mockito.when(goodMock.getStats()).thenAnswer(new Answer() {


-
To unsubscribe, 

[28/50] [abbrv] hadoop git commit: YARN-8048. Support auto-spawning of admin configured services during bootstrap of RM (Rohith Sharma K S via wangda)

2018-04-10 Thread xyao
YARN-8048. Support auto-spawning of admin configured services during bootstrap 
of RM (Rohith Sharma K S via wangda)

Change-Id: I2d8d61ccad55e1118009294d7e17822df3cd0fd5


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4e63ccc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4e63ccc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4e63ccc

Branch: refs/heads/HDFS-7240
Commit: d4e63ccca0763b452e4a0169dd932b3f32066281
Parents: 00905ef
Author: Wangda Tan 
Authored: Fri Apr 6 21:24:58 2018 -0700
Committer: Wangda Tan 
Committed: Fri Apr 6 21:24:58 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   4 +
 .../hadoop-yarn-services-api/pom.xml|   5 +
 .../client/SystemServiceManagerImpl.java| 381 +++
 .../service/client/TestSystemServiceImpl.java   | 180 +
 .../users/sync/user1/example-app1.yarnfile  |  16 +
 .../users/sync/user1/example-app2.yarnfile  |  16 +
 .../users/sync/user1/example-app3.json  |  16 +
 .../users/sync/user2/example-app1.yarnfile  |  16 +
 .../users/sync/user2/example-app2.yarnfile  |  16 +
 .../yarn/service/conf/YarnServiceConf.java  |   2 +
 .../yarn/service/TestSystemServiceManager.java  | 156 
 .../server/service/SystemServiceManager.java|  25 ++
 .../yarn/server/service/package-info.java   |  27 ++
 .../server/resourcemanager/ResourceManager.java |  30 +-
 14 files changed, 889 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4e63ccc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 41755e2..7a2a3ce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -343,6 +343,10 @@ public class YarnConfiguration extends Configuration {
   public static final String YARN_API_SERVICES_ENABLE = "yarn."
   + "webapp.api-service.enable";
 
+  @Private
+  public static final String DEFAULT_YARN_API_SYSTEM_SERVICES_CLASS =
+  "org.apache.hadoop.yarn.service.client.SystemServiceManagerImpl";
+
   public static final String RM_RESOURCE_TRACKER_ADDRESS =
 RM_PREFIX + "resource-tracker.address";
   public static final int DEFAULT_RM_RESOURCE_TRACKER_PORT = 8031;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4e63ccc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
index 7fe2ef6..354c9b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
@@ -71,6 +71,7 @@
 
   
 **/*.json
+**/*.yarnfile
   
 
   
@@ -96,6 +97,10 @@
 
 
   org.apache.hadoop
+  hadoop-yarn-server-common
+
+
+  org.apache.hadoop
   hadoop-common
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4e63ccc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
new file mode 100644
index 000..225f8bd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information 

[17/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
new file mode 100644
index 000..ab7c120
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
@@ -0,0 +1,3034 @@
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+  
+  
+
+
+  
+  
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+  
+  
+  
+  
+
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+
+
+ 

[20/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.1.0.xml
--
diff --git 
a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.1.0.xml
 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.1.0.xml
new file mode 100644
index 000..f4762d9
--- /dev/null
+++ 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.1.0.xml
@@ -0,0 +1,28075 @@
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  

[04/50] [abbrv] hadoop git commit: YARN-8035. Uncaught exception in ContainersMonitorImpl during relaunch due to the process ID changing. Contributed by Shane Kumpf.

2018-04-10 Thread xyao
YARN-8035. Uncaught exception in ContainersMonitorImpl during relaunch due to 
the process ID changing. Contributed by Shane Kumpf.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d06d885
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d06d885
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d06d885

Branch: refs/heads/HDFS-7240
Commit: 2d06d885c84b2e4a3acb6d3e0c50d4870e37ca82
Parents: 5a174f8
Author: Miklos Szegedi 
Authored: Tue Apr 3 10:01:00 2018 -0700
Committer: Miklos Szegedi 
Committed: Tue Apr 3 10:01:00 2018 -0700

--
 .../containermanager/monitor/ContainerMetrics.java |  2 +-
 .../monitor/TestContainerMetrics.java  | 17 +
 2 files changed, 18 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d06d885/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
index a6aa337..2a95849 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
@@ -274,7 +274,7 @@ public class ContainerMetrics implements MetricsSource {
   }
 
   public void recordProcessId(String processId) {
-registry.tag(PROCESSID_INFO, processId);
+registry.tag(PROCESSID_INFO, processId, true);
   }
 
   public void recordResourceLimit(int vmemLimit, int pmemLimit, int cpuVcores) 
{

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d06d885/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainerMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainerMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainerMetrics.java
index 1840d62..8b2bff1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainerMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainerMetrics.java
@@ -206,4 +206,21 @@ public class TestContainerMetrics {
 }
 Assert.assertEquals(expectedValues.keySet(), testResults);
   }
+
+  @Test
+  public void testContainerMetricsUpdateContainerPid() {
+ContainerId containerId = mock(ContainerId.class);
+ContainerMetrics metrics = ContainerMetrics.forContainer(containerId,
+100, 1);
+
+String origPid = "1234";
+metrics.recordProcessId(origPid);
+assertEquals(origPid, metrics.registry.getTag(
+ContainerMetrics.PROCESSID_INFO.name()).value());
+
+String newPid = "4321";
+metrics.recordProcessId(newPid);
+assertEquals(newPid, metrics.registry.getTag(
+ContainerMetrics.PROCESSID_INFO.name()).value());
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: HADOOP-15376. Remove double semi colons on imports that make Clover fall over.

2018-04-10 Thread xyao
HADOOP-15376. Remove double semi colons on imports that make Clover fall over.

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cef8eb79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cef8eb79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cef8eb79

Branch: refs/heads/HDFS-7240
Commit: cef8eb79810383f9970ed3713deecc18fbf0ffaa
Parents: 6729047
Author: Ewan Higgs 
Authored: Tue Apr 10 23:58:26 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Apr 10 23:58:26 2018 +0900

--
 .../src/test/java/org/apache/hadoop/io/TestIOUtils.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cef8eb79/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
index 467e5bc..fca72d9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
@@ -39,7 +39,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.io.FileUtils;;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2018-04-10 Thread xyao
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df3ff904
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df3ff904
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df3ff904

Branch: refs/heads/HDFS-7240
Commit: df3ff9042a6327b784ecf90ea8be8f0fe567859e
Parents: bb3c07f 8ab776d
Author: Xiaoyu Yao 
Authored: Tue Apr 10 12:22:50 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue Apr 10 12:22:50 2018 -0700

--
 BUILDING.txt|14 +
 .../src/main/bin/hadoop-functions.sh| 9 +-
 .../apache/hadoop/crypto/key/KeyProvider.java   |11 +-
 .../fs/CommonConfigurationKeysPublic.java   |21 +
 .../main/java/org/apache/hadoop/ipc/RPC.java|35 +-
 .../org/apache/hadoop/net/NetworkTopology.java  |   106 +-
 .../hadoop/util/concurrent/HadoopExecutors.java | 9 +-
 .../src/site/markdown/HttpAuthentication.md | 2 +-
 .../markdown/release/3.1.0/CHANGES.3.1.0.md |  1022 +
 .../release/3.1.0/RELEASENOTES.3.1.0.md |   199 +
 .../fs/contract/AbstractContractCreateTest.java |12 +-
 .../java/org/apache/hadoop/io/TestIOUtils.java  | 2 +-
 .../hadoop/hdfs/protocol/AclException.java  |10 +
 .../ha/RequestHedgingProxyProvider.java | 3 +
 .../ha/TestRequestHedgingProxyProvider.java |34 +
 .../federation/metrics/NamenodeBeanMetrics.java | 3 +
 .../federation/router/ConnectionContext.java|35 +-
 .../federation/router/ConnectionManager.java|10 +-
 .../federation/router/ConnectionPool.java   |98 +-
 .../federation/router/ConnectionPoolId.java |19 +-
 .../server/federation/router/RemoteMethod.java  |68 +-
 .../router/RouterNamenodeProtocol.java  |   187 +
 .../federation/router/RouterRpcClient.java  |62 +-
 .../federation/router/RouterRpcServer.java  |   141 +-
 .../router/SubClusterTimeoutException.java  |33 +
 .../driver/impl/StateStoreFileSystemImpl.java   | 6 +-
 .../server/federation/MiniRouterDFSCluster.java |39 +-
 .../router/TestConnectionManager.java   |56 +-
 .../server/federation/router/TestRouter.java|70 +-
 .../federation/router/TestRouterQuota.java  | 4 +
 .../router/TestRouterRPCClientRetries.java  |   126 +-
 .../server/federation/router/TestRouterRpc.java |   136 +-
 .../src/test/resources/contract/webhdfs.xml | 5 +
 .../jdiff/Apache_Hadoop_HDFS_3.1.0.xml  |   676 +
 .../server/blockmanagement/BlockIdManager.java  |17 +
 .../server/blockmanagement/BlockManager.java| 5 +-
 .../blockmanagement/BlockManagerSafeMode.java   | 2 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  |12 +-
 .../blockmanagement/CorruptReplicasMap.java |35 +-
 .../blockmanagement/InvalidateBlocks.java   |13 +-
 .../server/namenode/EncryptionZoneManager.java  | 8 +-
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |12 +
 .../hdfs/server/namenode/FSTreeTraverser.java   |   339 +
 .../server/namenode/ReencryptionHandler.java|   615 +-
 .../server/namenode/ReencryptionUpdater.java| 2 +-
 .../src/site/markdown/ArchivalStorage.md| 2 +-
 .../src/site/markdown/MemoryStorage.md  | 2 +-
 .../blockmanagement/TestBlockManager.java   |61 +-
 .../blockmanagement/TestCorruptReplicaInfo.java |48 +-
 .../hdfs/server/namenode/TestReencryption.java  | 3 -
 .../namenode/TestReencryptionHandler.java   |10 +-
 .../apache/hadoop/net/TestNetworkTopology.java  |75 +-
 .../src/test/resources/testCryptoConf.xml   |19 +
 .../Apache_Hadoop_MapReduce_Common_3.1.0.xml|   113 +
 .../Apache_Hadoop_MapReduce_Core_3.1.0.xml  | 28075 +
 .../Apache_Hadoop_MapReduce_JobClient_3.1.0.xml |16 +
 .../jobhistory/JobHistoryEventHandler.java  | 2 +-
 hadoop-project/src/site/site.xml| 4 +
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   |18 +-
 .../fs/s3a/s3guard/LocalMetadataStore.java  |17 +-
 .../hadoop/fs/s3a/s3guard/MetadataStore.java|12 +
 .../fs/s3a/s3guard/NullMetadataStore.java   | 4 +
 .../hadoop/fs/s3a/s3guard/S3GuardTool.java  |14 +-
 .../site/markdown/tools/hadoop-aws/s3guard.md   |11 +-
 .../s3guard/AbstractS3GuardToolTestBase.java|21 +-
 .../dev-support/findbugs-exclude.xml| 7 +
 .../jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml   |  3146 ++
 .../jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml   |  3034 ++
 .../Apache_Hadoop_YARN_Server_Common_3.1.0.xml  |  1331 +
 .../api/records/AllocationTagNamespaceType.java | 2 +-
 .../timelineservice/SubApplicationEntity.java   |50 +
 .../hadoop/yarn/conf/YarnConfiguration.java |42 +
 .../hadoop-yarn-services-api/pom.xml| 

[30/50] [abbrv] hadoop git commit: HDFS-13292. Crypto command should give proper exception when trying to set key on existing EZ directory. Contributed by Ranith Sardar.

2018-04-10 Thread xyao
HDFS-13292. Crypto command should give proper exception when trying to set key 
on existing EZ directory. Contributed by Ranith Sardar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/70590cd8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/70590cd8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/70590cd8

Branch: refs/heads/HDFS-7240
Commit: 70590cd8d948de581e2ae1184afb08574c67bbbe
Parents: 00ebec8
Author: Surendra Singh Lilhore 
Authored: Sat Apr 7 11:23:49 2018 +0530
Committer: Surendra Singh Lilhore 
Committed: Sat Apr 7 11:23:49 2018 +0530

--
 .../server/namenode/EncryptionZoneManager.java   |  8 
 .../src/test/resources/testCryptoConf.xml| 19 +++
 2 files changed, 23 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/70590cd8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index b1bca98..d06cd1c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -541,10 +541,6 @@ public class EncryptionZoneManager {
 if (srcIIP.getLastINode() == null) {
   throw new FileNotFoundException("cannot find " + srcIIP.getPath());
 }
-if (dir.isNonEmptyDirectory(srcIIP)) {
-  throw new IOException(
-  "Attempt to create an encryption zone for a non-empty directory.");
-}
 
 INode srcINode = srcIIP.getLastINode();
 if (!srcINode.isDirectory()) {
@@ -557,6 +553,10 @@ public class EncryptionZoneManager {
   "Directory " + srcIIP.getPath() + " is already an encryption zone.");
 }
 
+if (dir.isNonEmptyDirectory(srcIIP)) {
+  throw new IOException(
+  "Attempt to create an encryption zone for a non-empty directory.");
+}
 final HdfsProtos.ZoneEncryptionInfoProto proto =
 PBHelperClient.convert(suite, version, keyName);
 final XAttr ezXAttr = XAttrHelper

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70590cd8/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
index c109442..f603cc9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
@@ -114,6 +114,25 @@
 
 
 
+  Test failure of creating EZ on an existing EZ
+  
+-fs NAMENODE -mkdir /foo
+-fs NAMENODE -ls /-
+-createZone -path /foo -keyName 
myKey
+-createZone -path /foo -keyName 
myKey
+  
+  
+-fs NAMENODE -rmdir /foo
+  
+  
+
+  SubstringComparator
+  Directory /foo is already an encryption 
zone
+
+  
+
+
+
   Test success of creating an EZ as a subdir of an existing 
EZ.
   
 -fs NAMENODE -mkdir /foo


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
Added CHANGES/RELEASES/Jdiff for 3.1.0 release

Change-Id: Ied5067a996151c04d15cad46c46ac98b60c37b39
(cherry picked from commit 2d96570452a72569befdf9cfe9b90c9fa2e0e261)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6cf023f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6cf023f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6cf023f9

Branch: refs/heads/HDFS-7240
Commit: 6cf023f9b76c0ae6ad2f80ffb0a9f77888c553e9
Parents: 3121e8c
Author: Wangda Tan 
Authored: Thu Apr 5 15:50:55 2018 -0700
Committer: Wangda Tan 
Committed: Thu Apr 5 15:52:39 2018 -0700

--
 .../markdown/release/3.1.0/CHANGES.3.1.0.md |  1022 +
 .../release/3.1.0/RELEASENOTES.3.1.0.md |   199 +
 .../jdiff/Apache_Hadoop_HDFS_3.1.0.xml  |   676 +
 .../Apache_Hadoop_MapReduce_Common_3.1.0.xml|   113 +
 .../Apache_Hadoop_MapReduce_Core_3.1.0.xml  | 28075 +
 .../Apache_Hadoop_MapReduce_JobClient_3.1.0.xml |16 +
 .../jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml   |  3146 ++
 .../jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml   |  3034 ++
 .../Apache_Hadoop_YARN_Server_Common_3.1.0.xml  |  1331 +
 9 files changed, 37612 insertions(+)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] [abbrv] hadoop git commit: HDFS-13353. RBF: TestRouterWebHDFSContractCreate failed. Contributed by Takanobu Asanuma.

2018-04-10 Thread xyao
HDFS-13353. RBF: TestRouterWebHDFSContractCreate failed. Contributed by 
Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3121e8c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3121e8c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3121e8c2

Branch: refs/heads/HDFS-7240
Commit: 3121e8c29361cb560df29188e1cd1061a5fc34c4
Parents: f32d627
Author: Wei Yan 
Authored: Thu Apr 5 12:00:52 2018 -0700
Committer: Wei Yan 
Committed: Thu Apr 5 12:00:52 2018 -0700

--
 .../hadoop/fs/contract/AbstractContractCreateTest.java  | 12 ++--
 .../src/test/resources/contract/webhdfs.xml |  5 +
 2 files changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3121e8c2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
index 2053f50..07c99e0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
@@ -244,12 +244,12 @@ public abstract class AbstractContractCreateTest extends
   out.write('a');
   out.flush();
   if (!fs.exists(path)) {
-
-if (isSupported(IS_BLOBSTORE)) {
-  // object store: downgrade to a skip so that the failure is visible
-  // in test results
-  skip("Filesystem is an object store and newly created files are not "
-  + "immediately visible");
+if (isSupported(IS_BLOBSTORE) ||
+isSupported(CREATE_VISIBILITY_DELAYED)) {
+  // object store or some file systems: downgrade to a skip so that the
+  // failure is visible in test results
+  skip("For object store or some file systems, newly created files are"
+  + " not immediately visible");
 }
 assertPathExists("expected path to be visible before file closed",
 path);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3121e8c2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml
index f9b7d94..0cb6dd8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml
@@ -23,4 +23,9 @@
 false
   
 
+  
+fs.contract.create-visibility-delayed
+true
+  
+
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] [abbrv] hadoop git commit: YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. Contributed by Suma Shivaprasad.

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/821b0de4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
index 6c6ac20..addec66 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.security.TestGroupsCaching;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
@@ -65,6 +66,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
 .SimpleGroupsMapping;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.After;
 import org.junit.Assert;
@@ -89,6 +92,8 @@ import static 
org.apache.hadoop.yarn.server.resourcemanager.scheduler
 .capacity.CapacitySchedulerConfiguration.DOT;
 import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
 .capacity.CapacitySchedulerConfiguration.FAIR_APP_ORDERING_POLICY;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.capacity.CapacitySchedulerConfiguration.ROOT;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -99,7 +104,7 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   private static final Log LOG = LogFactory.getLog(
   TestCapacitySchedulerAutoCreatedQueueBase.class);
   public static final int GB = 1024;
-  public final static ContainerUpdates NULL_UPDATE_REQUESTS =
+  public static final ContainerUpdates NULL_UPDATE_REQUESTS =
   new ContainerUpdates();
 
   public static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
@@ -112,9 +117,6 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final String B1 = B + ".b1";
   public static final String B2 = B + ".b2";
   public static final String B3 = B + ".b3";
-  public static final String C1 = C + ".c1";
-  public static final String C2 = C + ".c2";
-  public static final String C3 = C + ".c3";
   public static final float A_CAPACITY = 20f;
   public static final float B_CAPACITY = 40f;
   public static final float C_CAPACITY = 20f;
@@ -124,8 +126,6 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final float B1_CAPACITY = 60f;
   public static final float B2_CAPACITY = 20f;
   public static final float B3_CAPACITY = 20f;
-  public static final float C1_CAPACITY = 20f;
-  public static final float C2_CAPACITY = 20f;
 
   public static final int NODE_MEMORY = 16;
 
@@ -147,12 +147,14 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final String NODEL_LABEL_GPU = "GPU";
   public static final String NODEL_LABEL_SSD = "SSD";
 
+  public static final float NODE_LABEL_GPU_TEMPLATE_CAPACITY = 30.0f;
+  public static final float NODEL_LABEL_SSD_TEMPLATE_CAPACITY = 40.0f;
+
   protected MockRM mockRM = null;
   protected MockNM nm1 = null;
   protected MockNM nm2 = null;
   protected MockNM nm3 = null;
   protected CapacityScheduler cs;
-  private final TestCapacityScheduler tcs = new TestCapacityScheduler();
   protected SpyDispatcher dispatcher;
   private static EventHandler rmAppEventEventHandler;
 
@@ -215,15 +217,29 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   }
 
   protected void setupNodes(MockRM newMockRM) throws Exception {
+NodeLabel ssdLabel = Records.newRecord(NodeLabel.class);
+ssdLabel.setName(NODEL_LABEL_SSD);
+ssdLabel.setExclusivity(true);
+
 nm1 

[12/50] [abbrv] hadoop git commit: HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be considered as striped block. (Contributed by Lei (Eddy) Xu).

2018-04-10 Thread xyao
HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be 
considered as striped block. (Contributed by Lei (Eddy) Xu).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d737bf99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d737bf99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d737bf99

Branch: refs/heads/HDFS-7240
Commit: d737bf99d44ce34cd01baad716d23df269267c95
Parents: e52539b
Author: Lei Xu 
Authored: Wed Apr 4 15:56:17 2018 -0700
Committer: Lei Xu 
Committed: Thu Apr 5 09:59:10 2018 -0700

--
 .../server/blockmanagement/BlockIdManager.java  | 17 ++
 .../server/blockmanagement/BlockManager.java|  5 +-
 .../blockmanagement/BlockManagerSafeMode.java   |  2 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  | 12 ++--
 .../blockmanagement/CorruptReplicasMap.java | 35 +--
 .../blockmanagement/InvalidateBlocks.java   | 13 +++--
 .../blockmanagement/TestBlockManager.java   | 61 
 .../blockmanagement/TestCorruptReplicaInfo.java | 48 ++-
 8 files changed, 136 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 321155b..5eebe8e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -239,6 +239,23 @@ public class BlockIdManager {
 legacyGenerationStampLimit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
   }
 
+  /**
+   * Return true if the block is a striped block.
+   *
+   * Before HDFS-4645, block ID was randomly generated (legacy), so it is
+   * possible that legacy block ID to be negative, which should not be
+   * considered as striped block ID.
+   *
+   * @see #isLegacyBlock(Block) detecting legacy block IDs.
+   */
+  public boolean isStripedBlock(Block block) {
+return isStripedBlockID(block.getBlockId()) && !isLegacyBlock(block);
+  }
+
+  /**
+   * See {@link #isStripedBlock(Block)}, we should not use this function alone
+   * to determine a block is striped block.
+   */
   public static boolean isStripedBlockID(long id) {
 return BlockType.fromBlockId(id) == STRIPED;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f49e1d8..76a7781 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -448,7 +448,8 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
 invalidateBlocks = new InvalidateBlocks(
 datanodeManager.getBlockInvalidateLimit(),
-startupDelayBlockDeletionInMs);
+startupDelayBlockDeletionInMs,
+blockIdManager);
 
 // Compute the map capacity by allocating 2% of total memory
 blocksMap = new BlocksMap(
@@ -1677,7 +1678,7 @@ public class BlockManager implements BlockStatsMXBean {
   corrupted.setBlockId(b.getStored().getBlockId());
 }
 corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(),
-b.getReasonCode());
+b.getReasonCode(), b.getStored().isStriped());
 
 NumberReplicas numberOfReplicas = countNodes(b.getStored());
 boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
 

[29/50] [abbrv] hadoop git commit: YARN-1151. Ability to configure auxiliary services from HDFS-based JAR files. (Xuan Gong via wangda)

2018-04-10 Thread xyao
YARN-1151. Ability to configure auxiliary services from HDFS-based JAR files. 
(Xuan Gong via wangda)

Change-Id: Ied37ff11e507fc86847753ba79486652c8fadfe9


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00ebec89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00ebec89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00ebec89

Branch: refs/heads/HDFS-7240
Commit: 00ebec89f101347a5da44657e388b30c57ed9deb
Parents: d4e63cc
Author: Wangda Tan 
Authored: Fri Apr 6 21:25:57 2018 -0700
Committer: Wangda Tan 
Committed: Fri Apr 6 21:25:57 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   3 +
 .../containermanager/AuxServices.java   | 160 +-
 .../containermanager/ContainerManagerImpl.java  |   3 +-
 .../containermanager/TestAuxServices.java   | 167 +--
 4 files changed, 313 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00ebec89/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 7a2a3ce..2590b6f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2106,6 +2106,9 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_AUX_SERVICES_CLASSPATH =
   NM_AUX_SERVICES + ".%s.classpath";
 
+  public static final String NM_AUX_SERVICE_REMOTE_CLASSPATH =
+  NM_AUX_SERVICES + ".%s.remote-classpath";
+
   public static final String NM_AUX_SERVICES_SYSTEM_CLASSES =
   NM_AUX_SERVICES + ".%s.system-classes";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00ebec89/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index 57cca50..c8b7a76 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager;
 
+import java.io.IOException;
+import java.net.URI;
 import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.Collections;
@@ -29,45 +31,70 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.Service;
 import org.apache.hadoop.service.ServiceStateChangeListener;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext;
 import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext;
 import org.apache.hadoop.yarn.server.api.AuxiliaryLocalPathHandler;
 import org.apache.hadoop.yarn.server.api.AuxiliaryService;
 import org.apache.hadoop.yarn.server.api.ContainerInitializationContext;
 

[22/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/CHANGES.3.1.0.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/CHANGES.3.1.0.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/CHANGES.3.1.0.md
new file mode 100644
index 000..3ccbae4
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/CHANGES.3.1.0.md
@@ -0,0 +1,1022 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.1.0 - 2018-03-30
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-15008](https://issues.apache.org/jira/browse/HADOOP-15008) | Metrics 
sinks may emit too frequently if multiple sink periods are configured |  Minor 
| metrics | Erik Krogen | Erik Krogen |
+| [HDFS-12825](https://issues.apache.org/jira/browse/HDFS-12825) | Fsck report 
shows config key name for min replication issues |  Minor | hdfs | Harshakiran 
Reddy | Gabor Bota |
+| [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | RBF: 
Document Router and State Store metrics |  Major | documentation | Yiqun Lin | 
Yiqun Lin |
+| [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | RBF: Add 
ACL support for mount table |  Major | . | Yiqun Lin | Yiqun Lin |
+| [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | Ensure only 
NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath |  
Major | timelineclient, timelinereader, timelineserver | Vrushali C | Varun 
Saxena |
+| [HADOOP-13282](https://issues.apache.org/jira/browse/HADOOP-13282) | S3 blob 
etags to be made visible in S3A status/getFileChecksum() calls |  Minor | fs/s3 
| Steve Loughran | Steve Loughran |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use 
the ZooKeeper as the default State Store |  Minor | documentation | Yiqun Lin | 
Yiqun Lin |
+| [YARN-7677](https://issues.apache.org/jira/browse/YARN-7677) | Docker image 
cannot set HADOOP\_CONF\_DIR |  Major | . | Eric Badger | Jim Brennan |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix 
doc error setting up client |  Major | federation | tartarus | tartarus |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-15005](https://issues.apache.org/jira/browse/HADOOP-15005) | Support 
meta tag element in Hadoop XML configurations |  Major | . | Ajay Kumar | Ajay 
Kumar |
+| [YARN-3926](https://issues.apache.org/jira/browse/YARN-3926) | [Umbrella] 
Extend the YARN resource model for easier resource-type management and profiles 
|  Major | nodemanager, resourcemanager | Varun Vasudev | Varun Vasudev |
+| [HDFS-7877](https://issues.apache.org/jira/browse/HDFS-7877) | [Umbrella] 
Support maintenance state for datanodes |  Major | datanode, namenode | Ming Ma 
| Ming Ma |
+| [HADOOP-13055](https://issues.apache.org/jira/browse/HADOOP-13055) | 
Implement linkMergeSlash and linkFallback for ViewFileSystem |  Major | fs, 
viewfs | Zhe Zhang | Manoj Govindassamy |
+| [YARN-6871](https://issues.apache.org/jira/browse/YARN-6871) | Add 
additional deSelects params in RMWebServices#getAppReport |  Major | 
resourcemanager, router | Giovanni Matteo Fumarola | Tanuj Nayak |
+| [HADOOP-14840](https://issues.apache.org/jira/browse/HADOOP-14840) | Tool to 
estimate resource requirements of an application pipeline based on prior 
executions |  Major | tools | Subru Krishnan | Rui Li |
+| [HDFS-206](https://issues.apache.org/jira/browse/HDFS-206) | Support for 
head in FSShell |  Minor | . | Olga Natkovich | Gabor Bota |
+| [YARN-5079](https://issues.apache.org/jira/browse/YARN-5079) | [Umbrella] 
Native YARN framework layer for services and beyond |  Major | . | Vinod Kumar 
Vavilapalli |  |
+| [YARN-4757](https://issues.apache.org/jira/browse/YARN-4757) | [Umbrella] 
Simplified discovery of services via DNS mechanisms |  Major | . | Vinod Kumar 
Vavilapalli |  |
+| [HADOOP-13786](https://issues.apache.org/jira/browse/HADOOP-13786) | Add S3A 
committer for zero-rename commits to S3 endpoints |  Major | fs/s3 | Steve 
Loughran | Steve Loughran |
+| [HDFS-9806](https://issues.apache.org/jira/browse/HDFS-9806) | Allow HDFS 
block replicas to be provided by an external storage system |  Major | . | 
Chris Douglas |  |
+| [YARN-6592](https://issues.apache.org/jira/browse/YARN-6592) | [Umbrella] 
Rich placement constraints in YARN |  Major | . | Konstantinos Karanasos |  |
+| [HDFS-12998](https://issues.apache.org/jira/browse/HDFS-12998) | 
SnapshotDiff - Provide an iterator-based listing API for calculating 

[27/50] [abbrv] hadoop git commit: YARN-8110. AMRMProxy recover should catch for all throwable to avoid premature exit. (Botong Huang via Subru).

2018-04-10 Thread xyao
YARN-8110. AMRMProxy recover should catch for all throwable to avoid premature 
exit. (Botong Huang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00905efa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00905efa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00905efa

Branch: refs/heads/HDFS-7240
Commit: 00905efab22edd9857e0a3828c201bf70f03cb96
Parents: 024d7c0
Author: Subru Krishnan 
Authored: Fri Apr 6 16:31:16 2018 -0700
Committer: Subru Krishnan 
Committed: Fri Apr 6 16:31:16 2018 -0700

--
 .../nodemanager/amrmproxy/AMRMProxyService.java |  2 +-
 .../amrmproxy/BaseAMRMProxyTest.java|  5 +++
 .../amrmproxy/TestAMRMProxyService.java | 42 
 3 files changed, 48 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00905efa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index 815e39b..86fbb72 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -261,7 +261,7 @@ public class AMRMProxyService extends CompositeService 
implements
 // Create the intercepter pipeline for the AM
 initializePipeline(attemptId, user, amrmToken, localToken,
 entry.getValue(), true, amCred);
-  } catch (IOException e) {
+  } catch (Throwable e) {
 LOG.error("Exception when recovering " + attemptId
 + ", removing it from NMStateStore and move on", e);
 this.nmContext.getNMStateStore().removeAMRMProxyAppContext(attemptId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00905efa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
index 4b1a887..677732d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
@@ -112,6 +112,11 @@ public abstract class BaseAMRMProxyTest {
 return this.amrmProxyService;
   }
 
+  protected Context getNMContext() {
+Assert.assertNotNull(this.nmContext);
+return this.nmContext;
+  }
+
   @Before
   public void setUp() throws IOException {
 this.conf = createConfiguration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00905efa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
index b955311..1eefbd5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
+++ 

[33/50] [abbrv] hadoop git commit: HDFS-13402. RBF: Fix java doc for StateStoreFileSystemImpl. Contributed by Yiran Wu.

2018-04-10 Thread xyao
HDFS-13402. RBF: Fix java doc for StateStoreFileSystemImpl. Contributed by 
Yiran Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5700556c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5700556c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5700556c

Branch: refs/heads/HDFS-7240
Commit: 5700556cd65a558f4393e05acb7ea8db3ccd2f36
Parents: 0b345b7
Author: Yiqun Lin 
Authored: Sun Apr 8 12:01:55 2018 +0800
Committer: Yiqun Lin 
Committed: Sun Apr 8 12:01:55 2018 +0800

--
 .../federation/store/driver/impl/StateStoreFileSystemImpl.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5700556c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
index ad822fb..2e1ff8f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
@@ -35,13 +35,15 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
 import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * StateStoreDriver} implementation based on a filesystem. The most common uses
- * HDFS as a backend.
+ * {@link StateStoreDriver} implementation based on a filesystem. The common
+ * implementation uses HDFS as a backend. The path can be specified setting
+ * dfs.federation.router.driver.fs.path=hdfs://host:port/path/to/store.
  */
 public class StateStoreFileSystemImpl extends StateStoreFileBaseImpl {
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: HDFS-13384. RBF: Improve timeout RPC call mechanism. Contributed by Inigo Goiri.

2018-04-10 Thread xyao
HDFS-13384. RBF: Improve timeout RPC call mechanism. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e87be8a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e87be8a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e87be8a2

Branch: refs/heads/HDFS-7240
Commit: e87be8a2a49573897e40bfdf43541e3635e35c98
Parents: a92200f
Author: Yiqun Lin 
Authored: Tue Apr 10 15:34:42 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Apr 10 15:34:42 2018 +0800

--
 .../federation/metrics/NamenodeBeanMetrics.java |   3 +
 .../federation/router/RouterRpcClient.java  |   2 +-
 .../router/SubClusterTimeoutException.java  |  33 +
 .../server/federation/MiniRouterDFSCluster.java |  31 -
 .../router/TestRouterRPCClientRetries.java  | 126 ++-
 5 files changed, 192 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e87be8a2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
index e8c6c82..4d22ae7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
@@ -45,6 +45,7 @@ import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.router.Router;
 import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
+import 
org.apache.hadoop.hdfs.server.federation.router.SubClusterTimeoutException;
 import org.apache.hadoop.hdfs.server.federation.store.MembershipStore;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoRequest;
@@ -396,6 +397,8 @@ public class NamenodeBeanMetrics
   }
 } catch (StandbyException e) {
   LOG.error("Cannot get {} nodes, Router in safe mode", type);
+} catch (SubClusterTimeoutException e) {
+  LOG.error("Cannot get {} nodes, subclusters timed out responding", type);
 } catch (IOException e) {
   LOG.error("Cannot get " + type + " nodes", e);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e87be8a2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index 4723b4c..e2c9cb4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -1007,7 +1007,7 @@ public class RouterRpcClient {
   String msg =
   "Invocation to \"" + loc + "\" for \"" + method + "\" timed out";
   LOG.error(msg);
-  IOException ioe = new IOException(msg);
+  IOException ioe = new SubClusterTimeoutException(msg);
   exceptions.put(location, ioe);
 } catch (ExecutionException ex) {
   Throwable cause = ex.getCause();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e87be8a2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/SubClusterTimeoutException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/SubClusterTimeoutException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/SubClusterTimeoutException.java
new file mode 100644
index 000..dac5bd6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/SubClusterTimeoutException.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more 

[43/50] [abbrv] hadoop git commit: HADOOP-15374. Add links of the new features of 3.1.0 to the top page

2018-04-10 Thread xyao
HADOOP-15374. Add links of the new features of 3.1.0 to the top page

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7623cc5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7623cc5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7623cc5a

Branch: refs/heads/HDFS-7240
Commit: 7623cc5a982219fff2bdd9a84650f45106cbdf47
Parents: e87be8a
Author: Takanobu Asanuma 
Authored: Tue Apr 10 18:59:40 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Apr 10 18:59:40 2018 +0900

--
 hadoop-project/src/site/site.xml | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7623cc5a/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index b5ecd73..fdf5583 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -106,6 +106,7 @@
   
   
   
+  
 
 
 
@@ -147,6 +148,9 @@
   
   
   
+  
+  
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: HADOOP-14855. Hadoop scripts may errantly believe a daemon is still running, preventing it from starting. Contributed by Robert Kanter.

2018-04-10 Thread xyao
HADOOP-14855. Hadoop scripts may errantly believe a daemon is still running, 
preventing it from starting. Contributed by Robert Kanter.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e52539b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e52539b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e52539b4

Branch: refs/heads/HDFS-7240
Commit: e52539b46fb13db423490fe02d46e9fae72d72fe
Parents: 345e762
Author: Miklos Szegedi 
Authored: Wed Apr 4 15:35:58 2018 -0700
Committer: Miklos Szegedi 
Committed: Wed Apr 4 15:35:58 2018 -0700

--
 .../hadoop-common/src/main/bin/hadoop-functions.sh  | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e52539b4/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 9ea4587..9ef48b6 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1725,11 +1725,16 @@ function hadoop_status_daemon
   shift
 
   local pid
+  local pspid
 
   if [[ -f "${pidfile}" ]]; then
 pid=$(cat "${pidfile}")
-if ps -p "${pid}" > /dev/null 2>&1; then
-  return 0
+if pspid=$(ps -o args= -p"${pid}" 2>/dev/null); then
+  # this is to check that the running process we found is actually the same
+  # daemon that we're interested in
+  if [[ ${pspid} =~ -Dproc_${daemonname} ]]; then
+return 0
+  fi
 fi
 return 1
   fi


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: HADOOP-15328. Fix the typo in HttpAuthentication.md. Contributed by fang zhenyi

2018-04-10 Thread xyao
HADOOP-15328. Fix the typo in HttpAuthentication.md. Contributed by fang zhenyi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0006346a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0006346a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0006346a

Branch: refs/heads/HDFS-7240
Commit: 0006346abe209a07d149fe5fd5a25cda0af26e07
Parents: 907919d
Author: Bharat 
Authored: Mon Apr 9 16:37:49 2018 -0700
Committer: Bharat 
Committed: Mon Apr 9 16:37:49 2018 -0700

--
 .../hadoop-common/src/site/markdown/HttpAuthentication.md  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0006346a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md
index 44d814c..721abea 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md
@@ -28,7 +28,7 @@ Hadoop HTTP web-consoles can be configured to require 
Kerberos authentication us
 
 In addition, Hadoop HTTP web-consoles support the equivalent of Hadoop's 
Pseudo/Simple authentication. If this option is enabled, the user name must be 
specified in the first browser interaction using the user.name query string 
parameter. e.g. `http://localhost:8088/cluster?user.name=babu`.
 
-If a custom authentication mechanism is required for the HTTP web-consoles, it 
is possible to implement a plugin to support the alternate authentication 
mechanism (refer to Hadoop hadoop-auth for details on writing an 
`AuthenticatorHandler`).
+If a custom authentication mechanism is required for the HTTP web-consoles, it 
is possible to implement a plugin to support the alternate authentication 
mechanism (refer to Hadoop hadoop-auth for details on writing an 
`AuthenticationHandler`).
 
 The next section describes how to configure Hadoop HTTP web-consoles to 
require user authentication.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. Contributed by Suma Shivaprasad.

2018-04-10 Thread xyao
YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. 
Contributed by Suma Shivaprasad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/821b0de4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/821b0de4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/821b0de4

Branch: refs/heads/HDFS-7240
Commit: 821b0de4c59156d4a65112de03ba3e7e1c88e309
Parents: 5700556
Author: Sunil G 
Authored: Mon Apr 9 21:17:22 2018 +0530
Committer: Sunil G 
Committed: Mon Apr 9 21:17:22 2018 +0530

--
 .../server/resourcemanager/RMServerUtils.java   |   5 +-
 .../rmapp/attempt/RMAppAttemptImpl.java |  47 ++
 .../resourcemanager/scheduler/Allocation.java   |  12 +
 .../scheduler/SchedulerUtils.java   |  33 +-
 .../capacity/AutoCreatedLeafQueue.java  |   3 +-
 .../AutoCreatedQueueManagementPolicy.java   |  12 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +
 .../CapacitySchedulerConfiguration.java |  28 +
 .../scheduler/capacity/LeafQueue.java   |  11 +
 .../scheduler/capacity/ManagedParentQueue.java  |   5 +-
 .../GuaranteedOrZeroCapacityOverTimePolicy.java | 573 +++
 .../placement/PendingAskUpdateResult.java   |   8 +
 .../yarn/server/resourcemanager/MockNM.java |  15 +
 .../server/resourcemanager/TestAppManager.java  |  20 +-
 ...stCapacitySchedulerAutoCreatedQueueBase.java | 241 +---
 .../TestCapacitySchedulerAutoQueueCreation.java | 233 +---
 .../TestQueueManagementDynamicEditPolicy.java   |  30 +-
 17 files changed, 834 insertions(+), 444 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/821b0de4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 33451295..ab6bbcf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -236,13 +236,14 @@ public class RMServerUtils {
*/
   public static void normalizeAndValidateRequests(List ask,
   Resource maximumResource, String queueName, YarnScheduler scheduler,
-  RMContext rmContext)
-  throws InvalidResourceRequestException {
+  RMContext rmContext) throws InvalidResourceRequestException {
 // Get queue from scheduler
 QueueInfo queueInfo = null;
 try {
   queueInfo = scheduler.getQueueInfo(queueName, false, false);
 } catch (IOException e) {
+  //Queue may not exist since it could be auto-created in case of
+  // dynamic queues
 }
 
 for (ResourceRequest resReq : ask) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/821b0de4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index c23b135..1b1e2c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 

[32/50] [abbrv] hadoop git commit: HADOOP-15366. Add a helper shutdown routine in HadoopExecutor to ensure clean shutdown. Contributed by Shashikant Banerjee.

2018-04-10 Thread xyao
HADOOP-15366. Add a helper shutdown routine in HadoopExecutor to ensure clean 
shutdown. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b345b76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b345b76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b345b76

Branch: refs/heads/HDFS-7240
Commit: 0b345b765370515d7222154ad5cae9b86f137a76
Parents: eb47c3d
Author: Mukul Kumar Singh 
Authored: Sat Apr 7 16:29:01 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Sat Apr 7 16:29:01 2018 +0530

--
 .../hadoop/util/concurrent/HadoopExecutors.java | 34 +++-
 1 file changed, 33 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b345b76/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
index 1bc6976..7a04c30 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
@@ -27,7 +27,7 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
-
+import org.slf4j.Logger;
 
 /** Factory methods for ExecutorService, ScheduledExecutorService instances.
  * These executor service instances provide additional functionality (e.g
@@ -91,6 +91,38 @@ public final class HadoopExecutors {
 return Executors.newSingleThreadScheduledExecutor(threadFactory);
   }
 
+  /**
+   * Helper routine to shutdown a executorService.
+   *
+   * @param executorService - executorService
+   * @param logger  - Logger
+   * @param timeout - Timeout
+   * @param unit- TimeUnits, generally seconds.
+   */
+  public static void shutdown(ExecutorService executorService, Logger logger,
+  long timeout, TimeUnit unit) {
+try {
+  if (executorService != null) {
+executorService.shutdown();
+try {
+  if (!executorService.awaitTermination(timeout, unit)) {
+executorService.shutdownNow();
+  }
+
+  if (!executorService.awaitTermination(timeout, unit)) {
+logger.error("Unable to shutdown properly.");
+  }
+} catch (InterruptedException e) {
+  logger.error("Error attempting to shutdown.", e);
+  executorService.shutdownNow();
+}
+  }
+} catch (Exception e) {
+  logger.error("Error during shutdown: ", e);
+  throw e;
+}
+  }
+
   //disable instantiation
   private HadoopExecutors() { }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] [abbrv] hadoop git commit: YARN-8106. Update LogAggregationIndexedFileController to use readFull instead read to avoid IOException while loading log meta. (Prabhu Joseph via wangda)

2018-04-10 Thread xyao
YARN-8106. Update LogAggregationIndexedFileController to use readFull instead 
read to avoid IOException while loading log meta. (Prabhu Joseph via wangda)

Change-Id: I63a65f73f8d1636e2c99ed9c8c2bbd05efcff80f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b779f4f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b779f4f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b779f4f0

Branch: refs/heads/HDFS-7240
Commit: b779f4f0f614fe47e05bc2be5494cf3cbcf6f63c
Parents: f7a17b0
Author: Wangda Tan 
Authored: Tue Apr 3 21:06:24 2018 -0700
Committer: Wangda Tan 
Committed: Tue Apr 3 21:06:24 2018 -0700

--
 .../filecontroller/ifile/LogAggregationIndexedFileController.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b779f4f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
index 5bba2e0..a8ae06f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
@@ -865,7 +865,8 @@ public class LogAggregationIndexedFileController
   byte[] array = new byte[offset];
   fsDataIStream.seek(
   fileLength - offset - Integer.SIZE/ Byte.SIZE - UUID_LENGTH);
-  int actual = fsDataIStream.read(array);
+  fsDataIStream.readFully(array);
+  int actual = array.length;
   if (actual != offset) {
 throw new IOException("Error on loading log meta from "
 + remoteLogPath);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: YARN-7905. Parent directory permission incorrect during public localization. Contributed by Bilwa S T.

2018-04-10 Thread xyao
YARN-7905. Parent directory permission incorrect during public localization. 
Contributed by Bilwa S T.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb47c3de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb47c3de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb47c3de

Branch: refs/heads/HDFS-7240
Commit: eb47c3de74ba4b8b3ef47eaf3a44e5562fd22fc9
Parents: 70590cd
Author: bibinchundatt 
Authored: Sat Apr 7 12:13:00 2018 +0530
Committer: bibinchundatt 
Committed: Sat Apr 7 12:26:29 2018 +0530

--
 .../localizer/ResourceLocalizationService.java  |  20 +++
 .../TestResourceLocalizationService.java| 125 +++
 2 files changed, 145 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb47c3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
old mode 100644
new mode 100755
index 29fc747..ddae2ae
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -152,6 +152,8 @@ public class ResourceLocalizationService extends 
CompositeService
LoggerFactory.getLogger(ResourceLocalizationService.class);
   public static final String NM_PRIVATE_DIR = "nmPrivate";
   public static final FsPermission NM_PRIVATE_PERM = new FsPermission((short) 
0700);
+  private static final FsPermission PUBLIC_FILECACHE_FOLDER_PERMS =
+  new FsPermission((short) 0755);
 
   private Server server;
   private InetSocketAddress localizationServerAddress;
@@ -881,6 +883,7 @@ public class ResourceLocalizationService extends 
CompositeService
 publicRsrc.getPathForLocalization(key, publicRootPath,
 delService);
 if (!publicDirDestPath.getParent().equals(publicRootPath)) {
+  createParentDirs(publicDirDestPath, publicRootPath);
   if (diskValidator != null) {
 diskValidator.checkStatus(
 new File(publicDirDestPath.toUri().getPath()));
@@ -932,6 +935,23 @@ public class ResourceLocalizationService extends 
CompositeService
   }
 }
 
+private void createParentDirs(Path destDirPath, Path destDirRoot)
+throws IOException {
+  if (destDirPath == null || destDirPath.equals(destDirRoot)) {
+return;
+  }
+  createParentDirs(destDirPath.getParent(), destDirRoot);
+  createDir(destDirPath, PUBLIC_FILECACHE_FOLDER_PERMS);
+}
+
+private void createDir(Path dirPath, FsPermission perms)
+throws IOException {
+  lfs.mkdir(dirPath, perms, false);
+  if (!perms.equals(perms.applyUMask(lfs.getUMask( {
+lfs.setPermission(dirPath, perms);
+  }
+}
+
 @Override
 public void run() {
   try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb47c3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
old mode 100644
new mode 100755
index d863c6a..4d03f15
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ 

[39/50] [abbrv] hadoop git commit: YARN-7667. Docker Stop grace period should be configurable. Contributed by Eric Badger

2018-04-10 Thread xyao
YARN-7667. Docker Stop grace period should be configurable. Contributed by Eric 
Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/907919d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/907919d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/907919d2

Branch: refs/heads/HDFS-7240
Commit: 907919d28c1b7e4496d189b46ecbb86a10d41339
Parents: 9059376
Author: Jason Lowe 
Authored: Mon Apr 9 17:19:21 2018 -0500
Committer: Jason Lowe 
Committed: Mon Apr 9 17:19:21 2018 -0500

--
 .../apache/hadoop/yarn/conf/YarnConfiguration.java| 14 ++
 .../src/main/resources/yarn-default.xml   |  8 
 .../linux/runtime/DockerLinuxContainerRuntime.java|  8 +++-
 .../linux/runtime/TestDockerContainerRuntime.java | 14 +++---
 4 files changed, 40 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/907919d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2590b6f..d2a71bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1951,6 +1951,20 @@ public class YarnConfiguration extends Configuration {
*/
   public static final boolean DEFAULT_NM_DOCKER_ALLOW_DELAYED_REMOVAL = false;
 
+  /**
+   * A configurable value to pass to the Docker Stop command. This value
+   * defines the number of seconds between the docker stop command sending
+   * a SIGTERM and a SIGKILL.
+   */
+  public static final String NM_DOCKER_STOP_GRACE_PERIOD =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "stop.grace-period";
+
+  /**
+   * The default value for the grace period between the SIGTERM and the
+   * SIGKILL in the Docker Stop command.
+   */
+  public static final int DEFAULT_NM_DOCKER_STOP_GRACE_PERIOD = 10;
+
   /** The mode in which the Java Container Sandbox should run detailed by
*  the JavaSandboxLinuxContainerRuntime. */
   public static final String YARN_CONTAINER_SANDBOX =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/907919d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 81b6658..def0816 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1787,6 +1787,14 @@
   
 
   
+A configurable value to pass to the Docker Stop command. This 
value
+  defines the number of seconds between the docker stop command sending
+  a SIGTERM and a SIGKILL.
+yarn.nodemanager.runtime.linux.docker.stop.grace-period
+10
+  
+
+  
 The mode in which the Java Container Sandbox should run 
detailed by
   the JavaSandboxLinuxContainerRuntime.
 yarn.nodemanager.runtime.linux.sandbox-mode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/907919d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 0290493..132ae38 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 

[41/50] [abbrv] hadoop git commit: HDFS-13410. RBF: Support federation with no subclusters. Contributed by Inigo Goiri.

2018-04-10 Thread xyao
HDFS-13410. RBF: Support federation with no subclusters. Contributed by Inigo 
Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a92200f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a92200f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a92200f4

Branch: refs/heads/HDFS-7240
Commit: a92200f4a6cec57b7080d1cd6e2a20d79d772dd6
Parents: 0006346
Author: Yiqun Lin 
Authored: Tue Apr 10 14:29:28 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Apr 10 14:29:28 2018 +0800

--
 .../federation/router/RouterRpcClient.java  |  4 +-
 .../server/federation/router/TestRouter.java| 70 +++-
 .../server/federation/router/TestRouterRpc.java | 21 +-
 3 files changed, 77 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92200f4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index ecb9f50..4723b4c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -932,7 +932,9 @@ public class RouterRpcClient {
 final UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
 final Method m = method.getMethod();
 
-if (locations.size() == 1) {
+if (locations.isEmpty()) {
+  throw new IOException("No remote locations available");
+} else if (locations.size() == 1) {
   // Shortcut, just one call
   T location = locations.iterator().next();
   String ns = location.getNameserviceId();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92200f4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java
index 39398f7..f8cf009 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java
@@ -17,23 +17,25 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
-import java.net.URISyntaxException;
+import java.net.InetSocketAddress;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.federation.MockResolver;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.service.Service.STATE;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -77,27 +79,31 @@ public class TestRouter {
 "0.0.0.0");
   }
 
-  @AfterClass
-  public static void destroy() {
-  }
-
-  @Before
-  public void setup() throws IOException, URISyntaxException {
-  }
-
-  @After
-  public void cleanup() {
-  }
-
   private static void testRouterStartup(Configuration routerConfig)
   throws InterruptedException, IOException {
 Router router = new Router();
 assertEquals(STATE.NOTINITED, router.getServiceState());
+assertEquals(RouterServiceState.UNINITIALIZED, router.getRouterState());
 router.init(routerConfig);
+if (routerConfig.getBoolean(
+RBFConfigKeys.DFS_ROUTER_SAFEMODE_ENABLE,
+RBFConfigKeys.DFS_ROUTER_SAFEMODE_ENABLE_DEFAULT)) {
+  assertEquals(RouterServiceState.SAFEMODE, 

[24/50] [abbrv] hadoop git commit: HADOOP-14759 S3GuardTool prune to prune specific bucket entries. Contributed by Gabor Bota.

2018-04-10 Thread xyao
HADOOP-14759 S3GuardTool prune to prune specific bucket entries. Contributed by 
Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea3849f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea3849f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea3849f0

Branch: refs/heads/HDFS-7240
Commit: ea3849f0ccd32b2f8acbc6107de3b9e91803ed4a
Parents: 6cf023f
Author: Aaron Fabbri 
Authored: Thu Apr 5 20:23:17 2018 -0700
Committer: Aaron Fabbri 
Committed: Thu Apr 5 20:23:17 2018 -0700

--
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   | 18 +
 .../fs/s3a/s3guard/LocalMetadataStore.java  | 17 +++-
 .../hadoop/fs/s3a/s3guard/MetadataStore.java| 12 +++
 .../fs/s3a/s3guard/NullMetadataStore.java   |  4 
 .../hadoop/fs/s3a/s3guard/S3GuardTool.java  | 10 +-
 .../site/markdown/tools/hadoop-aws/s3guard.md   | 11 --
 .../s3guard/AbstractS3GuardToolTestBase.java| 21 
 7 files changed, 73 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea3849f0/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 4c4043e..c579b3c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -812,23 +812,33 @@ public class DynamoDBMetadataStore implements 
MetadataStore {
   }
 
   @Retries.OnceRaw
-  private ItemCollection expiredFiles(long modTime) {
-String filterExpression = "mod_time < :mod_time";
+  private ItemCollection expiredFiles(long modTime,
+  String keyPrefix) {
+String filterExpression =
+"mod_time < :mod_time and begins_with(parent, :parent)";
 String projectionExpression = "parent,child";
-ValueMap map = new ValueMap().withLong(":mod_time", modTime);
+ValueMap map = new ValueMap()
+.withLong(":mod_time", modTime)
+.withString(":parent", keyPrefix);
 return table.scan(filterExpression, projectionExpression, null, map);
   }
 
   @Override
   @Retries.OnceRaw("once(batchWrite)")
   public void prune(long modTime) throws IOException {
+prune(modTime, "/");
+  }
+
+  @Override
+  @Retries.OnceRaw("once(batchWrite)")
+  public void prune(long modTime, String keyPrefix) throws IOException {
 int itemCount = 0;
 try {
   Collection deletionBatch =
   new ArrayList<>(S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT);
   int delay = conf.getInt(S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY,
   S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_DEFAULT);
-  for (Item item : expiredFiles(modTime)) {
+  for (Item item : expiredFiles(modTime, keyPrefix)) {
 PathMetadata md = PathMetadataDynamoDBTranslation
 .itemToPathMetadata(item, username);
 Path path = md.getFileStatus().getPath();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea3849f0/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
index 9267ab4..86059c8 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
@@ -303,12 +303,18 @@ public class LocalMetadataStore implements MetadataStore {
   }
 
   @Override
-  public synchronized void prune(long modTime) throws IOException {
+  public void prune(long modTime) throws IOException{
+prune(modTime, "");
+  }
+
+  @Override
+  public synchronized void prune(long modTime, String keyPrefix)
+  throws IOException {
 Iterator> files =
 fileHash.entrySet().iterator();
 while (files.hasNext()) {
   Map.Entry entry = files.next();
-  if (expired(entry.getValue().getFileStatus(), modTime)) {
+  if (expired(entry.getValue().getFileStatus(), modTime, keyPrefix)) {
 files.remove();
   }
 }
@@ -323,7 +329,7 @@ public class LocalMetadataStore implements MetadataStore {
 
   for (PathMetadata 

[21/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/RELEASENOTES.3.1.0.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/RELEASENOTES.3.1.0.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/RELEASENOTES.3.1.0.md
new file mode 100644
index 000..9e3c65d
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/RELEASENOTES.3.1.0.md
@@ -0,0 +1,199 @@
+
+
+# Apache Hadoop  3.1.0 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HDFS-11799](https://issues.apache.org/jira/browse/HDFS-11799) | *Major* | 
**Introduce a config to allow setting up write pipeline with fewer nodes than 
replication factor**
+
+Added new configuration 
"dfs.client.block.write.replace-datanode-on-failure.min-replication".
+ 
+The minimum number of replications that are needed to not to fail
+  the write pipeline if new datanodes can not be found to replace
+  failed datanodes (could be due to network failure) in the write pipeline.
+  If the number of the remaining datanodes in the write pipeline is greater
+  than or equal to this property value, continue writing to the remaining 
nodes.
+  Otherwise throw exception.
+
+  If this is set to 0, an exception will be thrown, when a replacement
+  can not be found.
+
+
+---
+
+* [HDFS-12486](https://issues.apache.org/jira/browse/HDFS-12486) | *Major* | 
**GetConf to get journalnodeslist**
+
+Adds a getconf command option to list the journal nodes.
+Usage: hdfs getconf -journalnodes
+
+
+---
+
+* [HADOOP-14840](https://issues.apache.org/jira/browse/HADOOP-14840) | *Major* 
| **Tool to estimate resource requirements of an application pipeline based on 
prior executions**
+
+The first version of Resource Estimator service, a tool that captures the 
historical resource usage of an app and predicts its future resource 
requirement.
+
+
+---
+
+* [YARN-5079](https://issues.apache.org/jira/browse/YARN-5079) | *Major* | 
**[Umbrella] Native YARN framework layer for services and beyond**
+
+A framework is implemented to orchestrate containers on YARN
+
+
+---
+
+* [YARN-4757](https://issues.apache.org/jira/browse/YARN-4757) | *Major* | 
**[Umbrella] Simplified discovery of services via DNS mechanisms**
+
+A DNS server backed by yarn service registry is implemented to enable service 
discovery on YARN using standard DNS lookup.
+
+
+---
+
+* [YARN-4793](https://issues.apache.org/jira/browse/YARN-4793) | *Major* | 
**[Umbrella] Simplified API layer for services and beyond**
+
+A REST API service is implemented to enable users to launch and manage 
container based services on YARN via REST API
+
+
+---
+
+* [HADOOP-15008](https://issues.apache.org/jira/browse/HADOOP-15008) | *Minor* 
| **Metrics sinks may emit too frequently if multiple sink periods are 
configured**
+
+Previously if multiple metrics sinks were configured with different periods, 
they may emit more frequently than configured, at a period as low as the GCD of 
the configured periods. This change makes all metrics sinks emit at their 
configured period.
+
+
+---
+
+* [HDFS-12825](https://issues.apache.org/jira/browse/HDFS-12825) | *Minor* | 
**Fsck report shows config key name for min replication issues**
+
+**WARNING: No release note provided for this change.**
+
+
+---
+
+* [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | *Major* | 
**RBF: Document Router and State Store metrics**
+
+This JIRA makes following change:
+Change Router metrics context from 'router' to 'dfs'.
+
+
+---
+
+* [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | *Major* | 
**RBF: Add ACL support for mount table**
+
+Mount tables support ACL, The users won't be able to modify their own entries 
(we are assuming these old (no-permissions before) mount table with 
owner:superuser, group:supergroup, permission:755 as the default permissions).  
The fix way is login as superuser to modify these mount table entries.
+
+
+---
+
+* [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | *Major* | 
**Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user 
classpath**
+
+Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user 
classpath.
+
+
+---
+
+* [HDFS-9806](https://issues.apache.org/jira/browse/HDFS-9806) | *Major* | 
**Allow HDFS block replicas to be provided by an external storage system**
+
+Provided storage allows data stored outside HDFS to be mapped to and addressed 
from HDFS. It builds on heterogeneous storage by introducing a new storage 
type, PROVIDED, to the set of media in a datanode. Clients accessing data in 
PROVIDED storages can cache replicas in local media, enforce HDFS invariants 
(e.g., 

[19/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.1.0.xml
--
diff --git 
a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.1.0.xml
 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.1.0.xml
new file mode 100644
index 000..ef04652
--- /dev/null
+++ 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.1.0.xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
+
+
+
+
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. Contributed by Surendra Singh Lilhore.

2018-04-10 Thread xyao
HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f89594f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f89594f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f89594f0

Branch: refs/heads/HDFS-7240
Commit: f89594f0b80e8efffdcb887daa4a18a2b0a228b3
Parents: cef8eb7
Author: Rakesh Radhakrishnan 
Authored: Tue Apr 10 23:35:00 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Tue Apr 10 23:35:00 2018 +0530

--
 .../hdfs/server/namenode/FSTreeTraverser.java   | 339 ++
 .../server/namenode/ReencryptionHandler.java| 615 ---
 .../server/namenode/ReencryptionUpdater.java|   2 +-
 .../hdfs/server/namenode/TestReencryption.java  |   3 -
 .../namenode/TestReencryptionHandler.java   |  10 +-
 5 files changed, 595 insertions(+), 374 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f89594f0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
new file mode 100644
index 000..ff77029
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
@@ -0,0 +1,339 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.util.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * FSTreeTraverser traverse directory recursively and process files
+ * in batches.
+ */
+@InterfaceAudience.Private
+public abstract class FSTreeTraverser {
+
+
+  public static final Logger LOG = LoggerFactory
+  .getLogger(FSTreeTraverser.class);
+
+  private final FSDirectory dir;
+
+  private long readLockReportingThresholdMs;
+
+  private Timer timer;
+
+  public FSTreeTraverser(FSDirectory dir, Configuration conf) {
+this.dir = dir;
+this.readLockReportingThresholdMs = conf.getLong(
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
+timer = new Timer();
+  }
+
+  public FSDirectory getFSDirectory() {
+return dir;
+  }
+
+  /**
+   * Iterate through all files directly inside parent, and recurse down
+   * directories. The listing is done in batch, and can optionally start after
+   * a position. The iteration of the inode tree is done in a depth-first
+   * fashion. But instead of holding all {@link INodeDirectory}'s in memory
+   * on the fly, only the path components to the current inode is held. This
+   * is to reduce memory consumption.
+   *
+   * @param parent
+   *  The inode id of parent directory
+   * @param startId
+   *  Id of the start inode.
+   * @param startAfter
+   *  Full path of a file the traverse should start after.
+   * @param traverseInfo
+   *  info which may required for processing the child's.
+   * @throws IOException
+   * @throws 

[26/50] [abbrv] hadoop git commit: YARN-8107. Give an informative message when incorrect format is used in ATSv2 filter attributes. (Rohith Sharma K S via Haibo Chen)

2018-04-10 Thread xyao
YARN-8107. Give an informative message when incorrect format is used in ATSv2 
filter attributes. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/024d7c08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/024d7c08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/024d7c08

Branch: refs/heads/HDFS-7240
Commit: 024d7c08704e6a5fcc1f53a8f56a44c84c8d5fa0
Parents: b17dc9f
Author: Haibo Chen 
Authored: Fri Apr 6 09:37:21 2018 -0700
Committer: Haibo Chen 
Committed: Fri Apr 6 09:39:01 2018 -0700

--
 .../reader/TimelineParserForCompareExpr.java|  7 +-
 .../reader/TimelineParserForEqualityExpr.java   |  7 +-
 .../TestTimelineReaderWebServicesUtils.java | 25 
 3 files changed, 37 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/024d7c08/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
index 1b020d9..a582956 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
@@ -282,7 +282,12 @@ abstract class TimelineParserForCompareExpr implements 
TimelineParser {
   parseValue(expr.substring(kvStartOffset, offset)));
 }
 if (filterList == null || filterList.getFilterList().isEmpty()) {
-  filterList = new TimelineFilterList(currentFilter);
+  if (currentFilter == null) {
+throw new TimelineParseException(
+"Invalid expression provided for " + exprName);
+  } else {
+filterList = new TimelineFilterList(currentFilter);
+  }
 } else if (currentFilter != null) {
   filterList.addFilter(currentFilter);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/024d7c08/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
index 7451713..2bdce38 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
@@ -325,7 +325,12 @@ abstract class TimelineParserForEqualityExpr implements 
TimelineParser {
   }
 }
 if (filterList == null || filterList.getFilterList().isEmpty()) {
-  filterList = new TimelineFilterList(currentFilter);
+  if (currentFilter == null) {
+throw new TimelineParseException(
+"Invalid expression provided for " + exprName);
+  } else {
+filterList = new TimelineFilterList(currentFilter);
+  }
 } else if (currentFilter != null) {
   filterList.addFilter(currentFilter);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/024d7c08/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java
--
diff --git 

[44/50] [abbrv] hadoop git commit: YARN-7804. [UI2] Refresh action on Grid view page should not be redirected to graph view. Contributed by Gergely Novák.

2018-04-10 Thread xyao
YARN-7804. [UI2] Refresh action on Grid view page should not be redirected to 
graph view. Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c1e77dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c1e77dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c1e77dd

Branch: refs/heads/HDFS-7240
Commit: 7c1e77dda4cb3ba8952328d142aafcf0366b5903
Parents: 7623cc5
Author: Sunil G 
Authored: Tue Apr 10 16:09:09 2018 +0530
Committer: Sunil G 
Committed: Tue Apr 10 16:09:09 2018 +0530

--
 .../main/webapp/app/components/timeline-view.js | 35 ++--
 .../webapp/app/controllers/yarn-app-attempt.js  |  9 -
 .../webapp/app/controllers/yarn-app/attempts.js | 11 --
 .../app/templates/components/timeline-view.hbs  | 12 +++
 .../webapp/app/templates/yarn-app-attempt.hbs   |  2 ++
 .../webapp/app/templates/yarn-app/attempts.hbs  |  2 ++
 6 files changed, 52 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c1e77dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
index 65a8cb1..3588009 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
@@ -25,6 +25,13 @@ export default Ember.Component.extend({
   tableDefinition: TableDefinition.create({
 searchType: 'manual',
   }),
+  graphDrawn: false,
+
+  actions: {
+changeViewType(param) {
+  this.sendAction("changeViewType", param);
+}
+  },
 
   canvas: {
 svg: undefined,
@@ -235,12 +242,10 @@ export default Ember.Component.extend({
   },
 
   didInsertElement: function() {
-// init tooltip
-this.initTooltip();
+// init model
 this.modelArr = [];
 this.containerIdArr = [];
 
-// init model
 if (this.get("rmModel")) {
   this.get("rmModel").forEach(function(o) {
 if(!this.modelArr.contains(o)) {
@@ -258,16 +263,30 @@ export default Ember.Component.extend({
   }.bind(this));
 }
 
-if(this.modelArr.length === 0) {
+if (this.modelArr.length === 0) {
   return;
 }
 
 this.modelArr.sort(function(a, b) {
   var tsA = a.get("startTs");
   var tsB = b.get("startTs");
-
   return tsA - tsB;
 });
+
+if (this.get('attemptModel')) {
+  this.setAttemptsGridColumnsAndRows();
+} else {
+  this.setContainersGridColumnsAndRows();
+}
+  },
+
+  didUpdate: function() {
+if (this.get("viewType") === "grid" || this.graphDrawn) {
+  return;
+}
+
+this.initTooltip();
+
 var begin = 0;
 if (this.modelArr.length > 0) {
   begin = this.modelArr[0].get("startTs");
@@ -289,11 +308,7 @@ export default Ember.Component.extend({
   this.setSelected(this.modelArr[0]);
 }
 
-if (this.get('attemptModel')) {
-  this.setAttemptsGridColumnsAndRows();
-} else {
-  this.setContainersGridColumnsAndRows();
-}
+this.graphDrawn = true;
   },
 
   setAttemptsGridColumnsAndRows: function() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c1e77dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
index 4c8b8a1..116920d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
@@ -19,8 +19,15 @@
 import Ember from 'ember';
 
 export default Ember.Controller.extend({
-  queryParams: ["service"],
+  queryParams: ["service", "viewType"],
   service: undefined,
+  viewType: "graph",
+
+  actions: {
+changeViewType(param) {
+  this.set("viewType", param);
+}
+  },
 
   breadcrumbs: Ember.computed("model.attempt.appId", "model.attempt.id", 
function () {
 var appId = this.get("model.attempt.appId");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c1e77dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/attempts.js

[13/50] [abbrv] hadoop git commit: YARN-6936. [Atsv2] Retrospect storing entities into sub application table from client perspective. (Rohith Sharma K S via Haibo Chen)

2018-04-10 Thread xyao
YARN-6936. [Atsv2] Retrospect storing entities into sub application table from 
client perspective. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8b8bd53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8b8bd53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8b8bd53

Branch: refs/heads/HDFS-7240
Commit: f8b8bd53c4797d406bea5b1b0cdb179e209169cc
Parents: d737bf99
Author: Haibo Chen 
Authored: Thu Apr 5 10:22:50 2018 -0700
Committer: Haibo Chen 
Committed: Thu Apr 5 10:23:42 2018 -0700

--
 .../timelineservice/SubApplicationEntity.java   | 50 
 .../yarn/client/api/TimelineV2Client.java   | 47 +++---
 .../client/api/impl/TimelineV2ClientImpl.java   | 30 ++--
 ...stTimelineReaderWebServicesHBaseStorage.java |  7 +--
 .../TestHBaseTimelineStorageEntities.java   |  3 +-
 .../storage/HBaseTimelineWriterImpl.java|  3 +-
 .../collector/TimelineCollectorWebService.java  | 19 ++--
 7 files changed, 138 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8b8bd53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
new file mode 100644
index 000..a83ef3d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records.timelineservice;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This entity represents a user defined entities to be stored under sub
+ * application table.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class SubApplicationEntity extends HierarchicalTimelineEntity {
+
+  public static final String YARN_APPLICATION_ID = "YARN_APPLICATION_ID";
+
+  public SubApplicationEntity(TimelineEntity entity) {
+super(entity);
+  }
+
+  /**
+   * Checks if the input TimelineEntity object is an SubApplicationEntity.
+   *
+   * @param te TimelineEntity object.
+   * @return true if input is an SubApplicationEntity, false otherwise
+   */
+  public static boolean isSubApplicationEntity(TimelineEntity te) {
+return (te != null && te instanceof SubApplicationEntity);
+  }
+
+  public void setApplicationId(String appId) {
+addInfo(YARN_APPLICATION_ID, appId);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8b8bd53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
index 423c059..e987b46 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
@@ -54,9 +54,10 @@ public abstract class TimelineV2Client extends 
CompositeService {
 
   /**
* 
-   * Send the information of a number of conceptual entities to the timeline
-   * service v.2 collector. It is a blocking API. The method will 

[37/50] [abbrv] hadoop git commit: HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. Contributed by Yiqun Lin.

2018-04-10 Thread xyao
HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9b9f48d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9b9f48d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9b9f48d

Branch: refs/heads/HDFS-7240
Commit: e9b9f48dad5ebb58ee529f918723089e8356c480
Parents: ac32b35
Author: Inigo Goiri 
Authored: Mon Apr 9 10:09:25 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 10:09:25 2018 -0700

--
 .../federation/router/RouterRpcServer.java  | 30 
 .../federation/router/TestRouterQuota.java  |  4 +++
 2 files changed, 28 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9b9f48d/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 1159289..e6d2f5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -900,7 +900,8 @@ public class RouterRpcServer extends AbstractService
   throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List srcLocations = getLocationsForPath(src, true);
+final List srcLocations =
+getLocationsForPath(src, true, false);
 // srcLocations may be trimmed by getRenameDestinations()
 final List locs = new LinkedList<>(srcLocations);
 RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -921,7 +922,8 @@ public class RouterRpcServer extends AbstractService
   final Options.Rename... options) throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List srcLocations = getLocationsForPath(src, true);
+final List srcLocations =
+getLocationsForPath(src, true, false);
 // srcLocations may be trimmed by getRenameDestinations()
 final List locs = new LinkedList<>(srcLocations);
 RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -998,7 +1000,8 @@ public class RouterRpcServer extends AbstractService
   public boolean delete(String src, boolean recursive) throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List locations = getLocationsForPath(src, true);
+final List locations =
+getLocationsForPath(src, true, false);
 RemoteMethod method = new RemoteMethod("delete",
 new Class[] {String.class, boolean.class}, new RemoteParam(),
 recursive);
@@ -2213,14 +2216,29 @@ public class RouterRpcServer extends AbstractService
 
   /**
* Get the possible locations of a path in the federated cluster.
+   * During the get operation, it will do the quota verification.
+   *
+   * @param path Path to check.
+   * @param failIfLocked Fail the request if locked (top mount point).
+   * @return Prioritized list of locations in the federated cluster.
+   * @throws IOException If the location for this path cannot be determined.
+   */
+  protected List getLocationsForPath(String path,
+  boolean failIfLocked) throws IOException {
+return getLocationsForPath(path, failIfLocked, true);
+  }
+
+  /**
+   * Get the possible locations of a path in the federated cluster.
*
* @param path Path to check.
* @param failIfLocked Fail the request if locked (top mount point).
+   * @param needQuotaVerify If need to do the quota verification.
* @return Prioritized list of locations in the federated cluster.
* @throws IOException If the location for this path cannot be determined.
*/
-  protected List getLocationsForPath(
-  String path, boolean failIfLocked) throws IOException {
+  protected List getLocationsForPath(String path,
+  boolean failIfLocked, boolean needQuotaVerify) throws IOException {
 try {
   // Check the location for this path
   final PathLocation location =
@@ -2241,7 +2259,7 @@ public class RouterRpcServer extends AbstractService
 }
 
 // Check quota
-if (this.router.isQuotaEnabled()) {
+if (this.router.isQuotaEnabled() && needQuotaVerify) {
   RouterQuotaUsage quotaUsage = this.router.getQuotaManager()
   .getQuotaUsage(path);
   if (quotaUsage != null) {


[45/50] [abbrv] hadoop git commit: HDFS-13420. License header is displayed in ArchivalStorage/MemoryStorage html pages. Contributed by Akira Ajisaka.

2018-04-10 Thread xyao
HDFS-13420. License header is displayed in ArchivalStorage/MemoryStorage html 
pages. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6729047a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6729047a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6729047a

Branch: refs/heads/HDFS-7240
Commit: 6729047a8ba273d27edcc6a1a9d397a096f44d84
Parents: 7c1e77d
Author: Weiwei Yang 
Authored: Tue Apr 10 22:10:44 2018 +0800
Committer: Weiwei Yang 
Committed: Tue Apr 10 22:10:44 2018 +0800

--
 .../hadoop-hdfs/src/site/markdown/ArchivalStorage.md   | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/MemoryStorage.md | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6729047a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 91ad107..ab7975a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -1,4 +1,4 @@

[18/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml
new file mode 100644
index 000..163eb3c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml
@@ -0,0 +1,3146 @@
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+   

[48/50] [abbrv] hadoop git commit: HDFS-13363. Record file path when FSDirAclOp throws AclException. Contributed by Gabor Bota.

2018-04-10 Thread xyao
HDFS-13363. Record file path when FSDirAclOp throws AclException. Contributed 
by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e76c2aeb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e76c2aeb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e76c2aeb

Branch: refs/heads/HDFS-7240
Commit: e76c2aeb288710ebee39680528dec44e454bbe9e
Parents: f89594f
Author: Xiao Chen 
Authored: Tue Apr 10 11:19:23 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 11:19:48 2018 -0700

--
 .../org/apache/hadoop/hdfs/protocol/AclException.java   | 10 ++
 .../apache/hadoop/hdfs/server/namenode/FSDirAclOp.java  | 12 
 2 files changed, 22 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76c2aeb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
index 1210999..9948b99 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
@@ -36,4 +36,14 @@ public class AclException extends IOException {
   public AclException(String message) {
 super(message);
   }
+
+  /**
+   * Creates a new AclException.
+   *
+   * @param message String message
+   * @param cause The cause of the exception
+   */
+  public AclException(String message, Throwable cause) {
+super(message, cause);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76c2aeb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 7b3471d..8d77f89 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -53,6 +53,8 @@ class FSDirAclOp {
   existingAcl, aclSpec);
   AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
   fsd.getEditLog().logSetAcl(src, newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -77,6 +79,8 @@ class FSDirAclOp {
 existingAcl, aclSpec);
   AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
   fsd.getEditLog().logSetAcl(src, newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -100,6 +104,8 @@ class FSDirAclOp {
 existingAcl);
   AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
   fsd.getEditLog().logSetAcl(src, newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -117,6 +123,8 @@ class FSDirAclOp {
   src = iip.getPath();
   fsd.checkOwner(pc, iip);
   unprotectedRemoveAcl(fsd, iip);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -136,6 +144,8 @@ class FSDirAclOp {
   fsd.checkOwner(pc, iip);
   List newAcl = unprotectedSetAcl(fsd, iip, aclSpec, false);
   fsd.getEditLog().logSetAcl(iip.getPath(), newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -162,6 +172,8 @@ class FSDirAclOp {
   .stickyBit(fsPermission.getStickyBit())
   .setPermission(fsPermission)
   .addEntries(acl).build();
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.readUnlock();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: HDFS-13376. Specify minimum GCC version to avoid TLS support error in Build of hadoop-hdfs-native-client. Contributed by LiXin Ge.

2018-04-10 Thread xyao
HDFS-13376. Specify minimum GCC version to avoid TLS support error in Build of 
hadoop-hdfs-native-client.  Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90593767
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90593767
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90593767

Branch: refs/heads/HDFS-7240
Commit: 905937678577fc0deb57489590863464562088ad
Parents: e9b9f48
Author: James Clampffer 
Authored: Mon Apr 9 13:48:42 2018 -0400
Committer: James Clampffer 
Committed: Mon Apr 9 13:48:42 2018 -0400

--
 BUILDING.txt | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90593767/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 3b9a2ef..9727004 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -10,6 +10,8 @@ Requirements:
 * CMake 3.1 or newer (if compiling native code)
 * Zlib devel (if compiling native code)
 * Cyrus SASL devel (if compiling native code)
+* One of the compilers that support thread_local storage: GCC 4.8.1 or later, 
Visual Studio,
+  Clang (community version), Clang (version for iOS 9 and later) (if compiling 
native code)
 * openssl devel (if compiling native hadoop-pipes and to get the best HDFS 
encryption performance)
 * Linux FUSE (Filesystem in Userspace) version 2.6 or above (if compiling 
fuse_dfs)
 * Jansson C XML parsing library ( if compiling libwebhdfs )


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: YARN-8119. [UI2] Timeline Server address' url scheme should be removed while accessing via KNOX. Contributed by Sunil G.

2018-04-10 Thread xyao
YARN-8119. [UI2] Timeline Server address' url scheme should be removed while 
accessing via KNOX. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f32d6275
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f32d6275
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f32d6275

Branch: refs/heads/HDFS-7240
Commit: f32d6275ba9e377fb722e2440986033d7ce8b602
Parents: f8b8bd5
Author: Rohith Sharma K S 
Authored: Thu Apr 5 23:32:35 2018 +0530
Committer: Rohith Sharma K S 
Committed: Thu Apr 5 23:32:35 2018 +0530

--
 .../hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32d6275/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index 1f9c7c1..83df971 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -61,6 +61,7 @@ function updateConfigs(application) {
   url: getTimeLineURL(rmhost),
   success: function(data) {
 timelinehost = data.property.value;
+timelinehost = timelinehost.replace(/(^\w+:|^)\/\//, '');
 ENV.hosts.timelineWebAddress = timelinehost;
 
 var address = timelinehost.split(":")[0];
@@ -94,6 +95,7 @@ function updateConfigs(application) {
   url: getTimeLineV1URL(rmhost),
   success: function(data) {
 timelinehost = data.property.value;
+timelinehost = timelinehost.replace(/(^\w+:|^)\/\//, '');
 ENV.hosts.timelineV1WebAddress = timelinehost;
 
 var address = timelinehost.split(":")[0];


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
new file mode 100644
index 000..1e826f3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
@@ -0,0 +1,1331 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+
+
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: YARN-8013. Support application tags when defining application namespaces for placement constraints. Contributed by Weiwei Yang.

2018-04-10 Thread xyao
YARN-8013. Support application tags when defining application namespaces for 
placement constraints. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7853ec8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7853ec8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7853ec8d

Branch: refs/heads/HDFS-7240
Commit: 7853ec8d2fb8731b7f7c28fd87491a0a2d47967e
Parents: 42cd367
Author: Konstantinos Karanasos 
Authored: Wed Apr 4 10:51:58 2018 -0700
Committer: Konstantinos Karanasos 
Committed: Wed Apr 4 10:51:58 2018 -0700

--
 .../api/records/AllocationTagNamespaceType.java |   2 +-
 .../constraint/AllocationTagNamespace.java  | 312 --
 .../scheduler/constraint/AllocationTags.java|  44 ++-
 .../constraint/AllocationTagsManager.java   |  47 ++-
 .../constraint/PlacementConstraintsUtil.java|  41 +--
 .../constraint/TargetApplications.java  |  53 ++-
 .../constraint/TargetApplicationsNamespace.java | 326 +++
 .../SingleConstraintAppPlacementAllocator.java  |  21 --
 .../server/resourcemanager/rmapp/MockRMApp.java |   9 +-
 ...estSchedulingRequestContainerAllocation.java |   5 +-
 .../constraint/TestAllocationTagsManager.java   |  22 +-
 .../constraint/TestAllocationTagsNamespace.java |  89 -
 .../TestPlacementConstraintsUtil.java   | 125 ++-
 13 files changed, 654 insertions(+), 442 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7853ec8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
index de5492e..f304600 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
@@ -26,7 +26,7 @@ public enum AllocationTagNamespaceType {
   SELF("self"),
   NOT_SELF("not-self"),
   APP_ID("app-id"),
-  APP_LABEL("app-label"),
+  APP_TAG("app-tag"),
   ALL("all");
 
   private String typeKeyword;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7853ec8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagNamespace.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagNamespace.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagNamespace.java
deleted file mode 100644
index 7b9f3be..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagNamespace.java
+++ /dev/null
@@ -1,312 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
-
-import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableSet;
-import org.apache.hadoop.yarn.api.records.AllocationTagNamespaceType;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Set;
-import 

[07/50] [abbrv] hadoop git commit: YARN-8115. [UI2] URL data like nodeHTTPAddress must be encoded in UI before using to access NM. Contributed by Sreenath Somarajapuram.

2018-04-10 Thread xyao
YARN-8115. [UI2] URL data like nodeHTTPAddress must be encoded in UI before 
using to access NM. Contributed by Sreenath Somarajapuram.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42cd367c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42cd367c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42cd367c

Branch: refs/heads/HDFS-7240
Commit: 42cd367c9308b944bc71de6c07b6c3f028a0d874
Parents: b779f4f
Author: Sunil G 
Authored: Wed Apr 4 22:13:14 2018 +0530
Committer: Sunil G 
Committed: Wed Apr 4 22:13:14 2018 +0530

--
 .../webapp/app/components/node-menu-panel.js| 25 
 .../webapp/app/controllers/yarn-node-app.js |  3 ++-
 .../webapp/app/controllers/yarn-node-apps.js|  3 ++-
 .../app/controllers/yarn-node-container.js  |  3 ++-
 .../app/controllers/yarn-node-containers.js |  3 ++-
 .../main/webapp/app/controllers/yarn-node.js|  3 ++-
 .../webapp/app/controllers/yarn-nodes/table.js  |  2 +-
 .../src/main/webapp/app/helpers/node-link.js|  2 +-
 .../src/main/webapp/app/initializers/loader.js  |  1 +
 .../main/webapp/app/routes/yarn-node-apps.js|  8 ---
 .../webapp/app/routes/yarn-node-containers.js   |  8 ---
 .../src/main/webapp/app/routes/yarn-node.js |  8 ---
 .../templates/components/node-menu-panel.hbs|  8 +++
 13 files changed, 57 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42cd367c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/node-menu-panel.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/node-menu-panel.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/node-menu-panel.js
new file mode 100644
index 000..31457be
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/node-menu-panel.js
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  encodedAddr : Ember.computed("nodeAddr", function(){
+return encodeURIComponent(this.get('nodeAddr'));
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42cd367c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
index 3dc09fc..e0d58ec 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
@@ -22,6 +22,7 @@ export default Ember.Controller.extend({
 
   breadcrumbs: Ember.computed('model.nodeInfo', function () {
 var nodeInfo = this.get('model.nodeInfo');
+var addr = encodeURIComponent(nodeInfo.addr);
 return [{
   text: "Home",
   routeName: 'application'
@@ -30,7 +31,7 @@ export default Ember.Controller.extend({
   routeName: 'yarn-nodes.table'
 }, {
   text: `Node [ ${nodeInfo.id} ]`,
-  href: `#/yarn-node/${nodeInfo.id}/${nodeInfo.addr}`,
+  href: `#/yarn-node/${nodeInfo.id}/${addr}/info`,
 }, {
   text: `Application [ ${nodeInfo.appId} ]`,
 }];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42cd367c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-apps.js
 

[10/50] [abbrv] hadoop git commit: YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions configuration. Contributed by Rohith Sharma K S

2018-04-10 Thread xyao
YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions 
configuration. Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/345e7624
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/345e7624
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/345e7624

Branch: refs/heads/HDFS-7240
Commit: 345e7624d58a058a1bad666bd1e5ce4b346a9056
Parents: 3087e89
Author: Vrushali C 
Authored: Wed Apr 4 15:08:03 2018 -0700
Committer: Vrushali C 
Committed: Wed Apr 4 15:08:03 2018 -0700

--
 .../jobhistory/JobHistoryEventHandler.java  |  2 +-
 .../hadoop/yarn/conf/YarnConfiguration.java | 21 
 .../client/api/impl/TimelineClientImpl.java | 23 +
 .../yarn/util/timeline/TimelineUtils.java   |  3 +--
 .../TestCombinedSystemMetricsPublisher.java | 26 
 .../reader/TimelineReaderServer.java|  2 +-
 6 files changed, 61 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/345e7624/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index fd93d07..52c13f1 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -269,7 +269,7 @@ public class JobHistoryEventHandler extends AbstractService
   LOG.info("Emitting job history data to the timeline service is enabled");
   if (YarnConfiguration.timelineServiceEnabled(conf)) {
 boolean timelineServiceV2Enabled =
-((int) YarnConfiguration.getTimelineServiceVersion(conf) == 2);
+YarnConfiguration.timelineServiceV2Enabled(conf);
 if(timelineServiceV2Enabled) {
   timelineV2Client =
   ((MRAppMaster.RunningAppContext)context).getTimelineV2Client();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/345e7624/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 42f2cae..41755e2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3797,6 +3797,27 @@ public class YarnConfiguration extends Configuration {
   }
 
   /**
+   * Returns whether the timeline service v.1,5 is enabled via configuration.
+   *
+   * @param conf the configuration
+   * @return whether the timeline service v.1.5 is enabled. V.1.5 refers to a
+   * version equal to 1.5.
+   */
+  public static boolean timelineServiceV15Enabled(Configuration conf) {
+boolean enabled = false;
+if (timelineServiceEnabled(conf)) {
+  Collection versions = getTimelineServiceVersions(conf);
+  for (Float version : versions) {
+if (Float.compare(version, 1.5f) == 0) {
+  enabled = true;
+  break;
+}
+  }
+}
+return enabled;
+  }
+
+  /**
* Returns all the active timeline service versions. It does not check
* whether the timeline service itself is enabled.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/345e7624/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index 44d6d48..88fccd9 100644
--- 

[05/50] [abbrv] hadoop git commit: YARN-7764. Findbugs warning: Resource#getResources may expose internal representation. Contributed by Weiwei Yang.

2018-04-10 Thread xyao
YARN-7764. Findbugs warning: Resource#getResources may expose internal 
representation. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7a17b02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7a17b02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7a17b02

Branch: refs/heads/HDFS-7240
Commit: f7a17b029ddd61ca73c2c2c88f5451dbf05fc501
Parents: 2d06d88
Author: Sunil G 
Authored: Wed Apr 4 09:22:35 2018 +0530
Committer: Sunil G 
Committed: Wed Apr 4 09:22:35 2018 +0530

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml  | 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7a17b02/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 81b8825..5841361 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -658,4 +658,11 @@
 
   
 
+  
+  
+
+
+
+  
+
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] [abbrv] hadoop git commit: YARN-8051. TestRMEmbeddedElector#testCallbackSynchronization is flaky. (Robert Kanter via Haibo Chen)

2018-04-10 Thread xyao
YARN-8051. TestRMEmbeddedElector#testCallbackSynchronization is flaky. (Robert 
Kanter via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93d47a0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93d47a0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93d47a0e

Branch: refs/heads/HDFS-7240
Commit: 93d47a0ed504ee81d4b74d340c1815bdbb3c9b14
Parents: 2be64eb
Author: Haibo Chen 
Authored: Tue Apr 3 07:58:21 2018 -0700
Committer: Haibo Chen 
Committed: Tue Apr 3 07:59:20 2018 -0700

--
 .../resourcemanager/TestRMEmbeddedElector.java  | 72 +---
 1 file changed, 49 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93d47a0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
index 140483a..9d38149 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
@@ -22,18 +22,22 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.ClientBaseWithFixes;
 import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.atLeast;
-import static org.mockito.Mockito.atMost;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -48,6 +52,8 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 
   private Configuration conf;
   private AtomicBoolean callbackCalled;
+  private AtomicInteger transitionToActiveCounter;
+  private AtomicInteger transitionToStandbyCounter;
 
   private enum SyncTestType {
 ACTIVE,
@@ -75,6 +81,8 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
 
 callbackCalled = new AtomicBoolean(false);
+transitionToActiveCounter = new AtomicInteger(0);
+transitionToStandbyCounter = new AtomicInteger(0);
   }
 
   /**
@@ -103,7 +111,7 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
*/
   @Test
   public void testCallbackSynchronization()
-  throws IOException, InterruptedException {
+  throws IOException, InterruptedException, TimeoutException {
 testCallbackSynchronization(SyncTestType.ACTIVE);
 testCallbackSynchronization(SyncTestType.STANDBY);
 testCallbackSynchronization(SyncTestType.NEUTRAL);
@@ -117,9 +125,10 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
* @param type the type of test to run
* @throws IOException if there's an issue transitioning
* @throws InterruptedException if interrupted
+   * @throws TimeoutException if waitFor timeout reached
*/
   private void testCallbackSynchronization(SyncTestType type)
-  throws IOException, InterruptedException {
+  throws IOException, InterruptedException, TimeoutException {
 AdminService as = mock(AdminService.class);
 RMContext rc = mock(RMContext.class);
 ResourceManager rm = mock(ResourceManager.class);
@@ -129,6 +138,17 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 when(rm.getRMContext()).thenReturn(rc);
 when(rc.getRMAdminService()).thenReturn(as);
 
+doAnswer(invocation -> {
+  transitionToActiveCounter.incrementAndGet();
+  return null;
+}).when(as).transitionToActive(any());
+

[09/50] [abbrv] hadoop git commit: YARN-7946. Update TimelineServerV2 doc as per YARN-7919. (Haibo Chen)

2018-04-10 Thread xyao
YARN-7946. Update TimelineServerV2 doc as per YARN-7919. (Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3087e891
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3087e891
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3087e891

Branch: refs/heads/HDFS-7240
Commit: 3087e89135365cad7f28f1bf8c9a1c483e245988
Parents: 7853ec8
Author: Haibo Chen 
Authored: Wed Apr 4 11:59:31 2018 -0700
Committer: Haibo Chen 
Committed: Wed Apr 4 11:59:31 2018 -0700

--
 BUILDING.txt| 12 
 .../src/site/markdown/TimelineServiceV2.md  |  8 
 2 files changed, 16 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3087e891/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index dbf2cb8..3b9a2ef 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -138,6 +138,18 @@ Maven build goals:
   * Use -DskipShade to disable client jar shading to speed up build times (in
 development environments only, not to build release artifacts)
 
+ YARN Application Timeline Service V2 build options:
+
+   YARN Timeline Service v.2 chooses Apache HBase as the primary backing 
storage. The supported
+   versions of Apache HBase are 1.2.6 (default) and 2.0.0-beta1.
+
+  * HBase 1.2.6 is used by default to build Hadoop. The official releases are 
ready to use if you
+plan on running Timeline Service v2 with HBase 1.2.6.
+
+  * Use -Dhbase.profile=2.0 to build Hadoop with HBase 2.0.0-beta1. Provide 
this option if you plan
+on running Timeline Service v2 with HBase 2.0.
+
+
  Snappy build options:
 
Snappy is a compression library that can be utilized by the native code.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3087e891/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index f097b60..312c10b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -190,9 +190,9 @@ Each step is explained in more detail below.
 
 #  Step 1) Set up the HBase cluster
 The first part is to set up or pick an Apache HBase cluster to use as the 
storage cluster. The
-version of Apache HBase that is supported with Timeline Service v.2 is 1.2.6. 
The 1.0.x versions
-do not work with Timeline Service v.2. Later versions of HBase have not been 
tested with
-Timeline Service.
+supported versions of Apache HBase are 1.2.6 (default) and 2.0.0-beta1.
+The 1.0.x versions do not work with Timeline Service v.2. By default, Hadoop 
releases are built
+with HBase 1.2.6. To use HBase 2.0.0-beta1, build from source with option 
-Dhbase.profile=2.0
 
 HBase has different deployment modes. Refer to the HBase book for 
understanding them and pick a
 mode that is suitable for your setup.
@@ -236,7 +236,7 @@ is needed for the `flowrun` table creation in the schema 
creator. The default HD
 For example,
 
 hadoop fs -mkdir /hbase/coprocessor
-hadoop fs -put 
hadoop-yarn-server-timelineservice-hbase-3.0.0-alpha1-SNAPSHOT.jar
+hadoop fs -put 
hadoop-yarn-server-timelineservice-hbase-coprocessor-3.2.0-SNAPSHOT.jar
/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15340. Provide meaningful RPC server name for RpcMetrics. Contributed by Elek Marton.

2018-04-10 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk e76c2aeb2 -> 8ab776d61


HADOOP-15340. Provide meaningful RPC server name for RpcMetrics. Contributed by 
Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ab776d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ab776d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ab776d6

Branch: refs/heads/trunk
Commit: 8ab776d61e569c12ec62024415ff68e5d3b10141
Parents: e76c2ae
Author: Xiaoyu Yao 
Authored: Tue Apr 10 11:42:54 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue Apr 10 11:42:54 2018 -0700

--
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  5 +-
 .../main/java/org/apache/hadoop/ipc/RPC.java| 46 +---
 .../main/java/org/apache/hadoop/ipc/Server.java |  9 
 .../apache/hadoop/ipc/WritableRpcEngine.java|  2 +-
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   | 11 +++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 56 +++-
 6 files changed, 117 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab776d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 639bbad..70fde60 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -419,8 +419,9 @@ public class ProtobufRpcEngine implements RpcEngine {
 String portRangeConfig)
 throws IOException {
   super(bindAddress, port, null, numHandlers,
-  numReaders, queueSizePerHandler, conf, classNameBase(protocolImpl
-  .getClass().getName()), secretManager, portRangeConfig);
+  numReaders, queueSizePerHandler, conf,
+  serverNameFromClass(protocolImpl.getClass()), secretManager,
+  portRangeConfig);
   this.verbose = verbose;  
   registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass,
   protocolImpl);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab776d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 8f8eda6..9cfadc7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -35,6 +35,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.HashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import javax.net.SocketFactory;
 
@@ -808,13 +810,45 @@ public class RPC {
   
   /** An RPC Server. */
   public abstract static class Server extends org.apache.hadoop.ipc.Server {
-   boolean verbose;
-   static String classNameBase(String className) {
-  String[] names = className.split("\\.", -1);
-  if (names == null || names.length == 0) {
-return className;
+
+boolean verbose;
+
+private static final Pattern COMPLEX_SERVER_NAME_PATTERN =
+Pattern.compile("(?:[^\\$]*\\$)*([A-Za-z][^\\$]+)(?:\\$\\d+)?");
+
+/**
+ * Get a meaningful and short name for a server based on a java class.
+ *
+ * The rules are defined to support the current naming schema of the
+ * generated protobuf classes where the final class usually an anonymous
+ * inner class of an inner class.
+ *
+ * 1. For simple classes it returns with the simple name of the classes
+ * (with the name without package name)
+ *
+ * 2. For inner classes, this is the simple name of the inner class.
+ *
+ * 3.  If it is an Object created from a class factory
+ *   E.g., org.apache.hadoop.ipc.TestRPC$TestClass$2
+ * this method returns parent class TestClass.
+ *
+ * 4. If it is an anonymous class E.g., 'org.apache.hadoop.ipc.TestRPC$10'
+ * serverNameFromClass returns parent class TestRPC.
+ *
+ *
+ */
+static String serverNameFromClass(Class clazz) {
+  String name = clazz.getName();
+  String[] names = clazz.getName().split("\\.", -1);
+  if (names != null && names.length > 0) {
+name = 

hadoop git commit: HDFS-13395. Ozone: Plugins support in HDSL Datanode Service. Contributed by Nanda Kumar.

2018-04-10 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 8475d6bb5 -> bb3c07fa3


HDFS-13395. Ozone: Plugins support in HDSL Datanode Service. Contributed by 
Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb3c07fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb3c07fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb3c07fa

Branch: refs/heads/HDFS-7240
Commit: bb3c07fa3e4f5b5c38c251e882a357eddab0957f
Parents: 8475d6b
Author: Xiaoyu Yao 
Authored: Tue Apr 10 11:28:52 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue Apr 10 11:28:52 2018 -0700

--
 .../src/main/compose/cblock/docker-config   |   3 +-
 .../src/main/compose/ozone/docker-config|   3 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   3 +
 .../common/src/main/resources/ozone-default.xml |   8 ++
 .../hadoop/ozone/HddsDatanodeService.java   | 118 ++-
 .../statemachine/DatanodeStateMachine.java  |  10 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |   5 -
 .../server/datanode/DataNodeServicePlugin.java  |  48 
 .../src/test/compose/docker-config  |   3 +-
 .../hadoop/ozone/MiniOzoneClassicCluster.java   |   4 +-
 .../hadoop/ozone/MiniOzoneTestHelper.java   |   5 +
 .../hadoop/ozone/web/ObjectStoreRestPlugin.java | 108 -
 .../ozone/web/OzoneHddsDatanodeService.java |  84 +
 13 files changed, 208 insertions(+), 194 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb3c07fa/hadoop-dist/src/main/compose/cblock/docker-config
--
diff --git a/hadoop-dist/src/main/compose/cblock/docker-config 
b/hadoop-dist/src/main/compose/cblock/docker-config
index 4690de0..f69bef0 100644
--- a/hadoop-dist/src/main/compose/cblock/docker-config
+++ b/hadoop-dist/src/main/compose/cblock/docker-config
@@ -27,7 +27,8 @@ OZONE-SITE.XML_ozone.scm.client.address=scm
 OZONE-SITE.XML_dfs.cblock.jscsi.cblock.server.address=cblock
 OZONE-SITE.XML_dfs.cblock.scm.ipaddress=scm
 OZONE-SITE.XML_dfs.cblock.service.leveldb.path=/tmp
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
 HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb3c07fa/hadoop-dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config 
b/hadoop-dist/src/main/compose/ozone/docker-config
index 8e5efa9..c693db0 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -23,11 +23,12 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
 OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.handler.type=distributed
 OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
 HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
 LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
 LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb3c07fa/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index ef96f379..72531a2 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -230,6 +230,9 @@ public final class OzoneConfigKeys {
   public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
   "ozone.web.authentication.kerberos.principal";
 
+  public static final String 

hadoop git commit: HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. Contributed by Surendra Singh Lilhore.

2018-04-10 Thread rakeshr
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 fe7a70e58 -> afbdd8fdc


HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. 
Contributed by Surendra Singh Lilhore.

(cherry picked from commit f89594f0b80e8efffdcb887daa4a18a2b0a228b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afbdd8fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afbdd8fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afbdd8fd

Branch: refs/heads/branch-3.0
Commit: afbdd8fdcfda16ab0b3b317d04fb296418a23290
Parents: fe7a70e
Author: Rakesh Radhakrishnan 
Authored: Tue Apr 10 23:35:00 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Tue Apr 10 23:43:28 2018 +0530

--
 .../hdfs/server/namenode/FSTreeTraverser.java   | 339 ++
 .../server/namenode/ReencryptionHandler.java| 615 ---
 .../server/namenode/ReencryptionUpdater.java|   2 +-
 .../hdfs/server/namenode/TestReencryption.java  |   3 -
 .../namenode/TestReencryptionHandler.java   |  10 +-
 5 files changed, 595 insertions(+), 374 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afbdd8fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
new file mode 100644
index 000..ff77029
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
@@ -0,0 +1,339 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.util.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * FSTreeTraverser traverse directory recursively and process files
+ * in batches.
+ */
+@InterfaceAudience.Private
+public abstract class FSTreeTraverser {
+
+
+  public static final Logger LOG = LoggerFactory
+  .getLogger(FSTreeTraverser.class);
+
+  private final FSDirectory dir;
+
+  private long readLockReportingThresholdMs;
+
+  private Timer timer;
+
+  public FSTreeTraverser(FSDirectory dir, Configuration conf) {
+this.dir = dir;
+this.readLockReportingThresholdMs = conf.getLong(
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
+timer = new Timer();
+  }
+
+  public FSDirectory getFSDirectory() {
+return dir;
+  }
+
+  /**
+   * Iterate through all files directly inside parent, and recurse down
+   * directories. The listing is done in batch, and can optionally start after
+   * a position. The iteration of the inode tree is done in a depth-first
+   * fashion. But instead of holding all {@link INodeDirectory}'s in memory
+   * on the fly, only the path components to the current inode is held. This
+   * is to reduce memory consumption.
+   *
+   * @param parent
+   *  The inode id of parent directory
+   * @param startId
+   *  Id of the start inode.
+   * @param startAfter
+   *  Full path of a file the traverse 

hadoop git commit: HDFS-13363. Record file path when FSDirAclOp throws AclException. Contributed by Gabor Bota.

2018-04-10 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk f89594f0b -> e76c2aeb2


HDFS-13363. Record file path when FSDirAclOp throws AclException. Contributed 
by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e76c2aeb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e76c2aeb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e76c2aeb

Branch: refs/heads/trunk
Commit: e76c2aeb288710ebee39680528dec44e454bbe9e
Parents: f89594f
Author: Xiao Chen 
Authored: Tue Apr 10 11:19:23 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 11:19:48 2018 -0700

--
 .../org/apache/hadoop/hdfs/protocol/AclException.java   | 10 ++
 .../apache/hadoop/hdfs/server/namenode/FSDirAclOp.java  | 12 
 2 files changed, 22 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76c2aeb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
index 1210999..9948b99 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
@@ -36,4 +36,14 @@ public class AclException extends IOException {
   public AclException(String message) {
 super(message);
   }
+
+  /**
+   * Creates a new AclException.
+   *
+   * @param message String message
+   * @param cause The cause of the exception
+   */
+  public AclException(String message, Throwable cause) {
+super(message, cause);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76c2aeb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 7b3471d..8d77f89 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -53,6 +53,8 @@ class FSDirAclOp {
   existingAcl, aclSpec);
   AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
   fsd.getEditLog().logSetAcl(src, newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -77,6 +79,8 @@ class FSDirAclOp {
 existingAcl, aclSpec);
   AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
   fsd.getEditLog().logSetAcl(src, newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -100,6 +104,8 @@ class FSDirAclOp {
 existingAcl);
   AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
   fsd.getEditLog().logSetAcl(src, newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -117,6 +123,8 @@ class FSDirAclOp {
   src = iip.getPath();
   fsd.checkOwner(pc, iip);
   unprotectedRemoveAcl(fsd, iip);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -136,6 +144,8 @@ class FSDirAclOp {
   fsd.checkOwner(pc, iip);
   List newAcl = unprotectedSetAcl(fsd, iip, aclSpec, false);
   fsd.getEditLog().logSetAcl(iip.getPath(), newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -162,6 +172,8 @@ class FSDirAclOp {
   .stickyBit(fsPermission.getStickyBit())
   .setPermission(fsPermission)
   .addEntries(acl).build();
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.readUnlock();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. Contributed by Surendra Singh Lilhore.

2018-04-10 Thread rakeshr
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 c9da1e97f -> 3414bf6db


HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. 
Contributed by Surendra Singh Lilhore.

(cherry picked from commit f89594f0b80e8efffdcb887daa4a18a2b0a228b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3414bf6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3414bf6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3414bf6d

Branch: refs/heads/branch-3.1
Commit: 3414bf6dbbe31929121142e00a9468e2ce09d98d
Parents: c9da1e9
Author: Rakesh Radhakrishnan 
Authored: Tue Apr 10 23:35:00 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Tue Apr 10 23:40:26 2018 +0530

--
 .../hdfs/server/namenode/FSTreeTraverser.java   | 339 ++
 .../server/namenode/ReencryptionHandler.java| 615 ---
 .../server/namenode/ReencryptionUpdater.java|   2 +-
 .../hdfs/server/namenode/TestReencryption.java  |   3 -
 .../namenode/TestReencryptionHandler.java   |  10 +-
 5 files changed, 595 insertions(+), 374 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3414bf6d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
new file mode 100644
index 000..ff77029
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
@@ -0,0 +1,339 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.util.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * FSTreeTraverser traverse directory recursively and process files
+ * in batches.
+ */
+@InterfaceAudience.Private
+public abstract class FSTreeTraverser {
+
+
+  public static final Logger LOG = LoggerFactory
+  .getLogger(FSTreeTraverser.class);
+
+  private final FSDirectory dir;
+
+  private long readLockReportingThresholdMs;
+
+  private Timer timer;
+
+  public FSTreeTraverser(FSDirectory dir, Configuration conf) {
+this.dir = dir;
+this.readLockReportingThresholdMs = conf.getLong(
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
+timer = new Timer();
+  }
+
+  public FSDirectory getFSDirectory() {
+return dir;
+  }
+
+  /**
+   * Iterate through all files directly inside parent, and recurse down
+   * directories. The listing is done in batch, and can optionally start after
+   * a position. The iteration of the inode tree is done in a depth-first
+   * fashion. But instead of holding all {@link INodeDirectory}'s in memory
+   * on the fly, only the path components to the current inode is held. This
+   * is to reduce memory consumption.
+   *
+   * @param parent
+   *  The inode id of parent directory
+   * @param startId
+   *  Id of the start inode.
+   * @param startAfter
+   *  Full path of a file the traverse 

hadoop git commit: HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. Contributed by Surendra Singh Lilhore.

2018-04-10 Thread rakeshr
Repository: hadoop
Updated Branches:
  refs/heads/trunk cef8eb798 -> f89594f0b


HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f89594f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f89594f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f89594f0

Branch: refs/heads/trunk
Commit: f89594f0b80e8efffdcb887daa4a18a2b0a228b3
Parents: cef8eb7
Author: Rakesh Radhakrishnan 
Authored: Tue Apr 10 23:35:00 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Tue Apr 10 23:35:00 2018 +0530

--
 .../hdfs/server/namenode/FSTreeTraverser.java   | 339 ++
 .../server/namenode/ReencryptionHandler.java| 615 ---
 .../server/namenode/ReencryptionUpdater.java|   2 +-
 .../hdfs/server/namenode/TestReencryption.java  |   3 -
 .../namenode/TestReencryptionHandler.java   |  10 +-
 5 files changed, 595 insertions(+), 374 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f89594f0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
new file mode 100644
index 000..ff77029
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
@@ -0,0 +1,339 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.util.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * FSTreeTraverser traverse directory recursively and process files
+ * in batches.
+ */
+@InterfaceAudience.Private
+public abstract class FSTreeTraverser {
+
+
+  public static final Logger LOG = LoggerFactory
+  .getLogger(FSTreeTraverser.class);
+
+  private final FSDirectory dir;
+
+  private long readLockReportingThresholdMs;
+
+  private Timer timer;
+
+  public FSTreeTraverser(FSDirectory dir, Configuration conf) {
+this.dir = dir;
+this.readLockReportingThresholdMs = conf.getLong(
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
+timer = new Timer();
+  }
+
+  public FSDirectory getFSDirectory() {
+return dir;
+  }
+
+  /**
+   * Iterate through all files directly inside parent, and recurse down
+   * directories. The listing is done in batch, and can optionally start after
+   * a position. The iteration of the inode tree is done in a depth-first
+   * fashion. But instead of holding all {@link INodeDirectory}'s in memory
+   * on the fly, only the path components to the current inode is held. This
+   * is to reduce memory consumption.
+   *
+   * @param parent
+   *  The inode id of parent directory
+   * @param startId
+   *  Id of the start inode.
+   * @param startAfter
+   *  Full path of a file the traverse should start after.
+   * @param traverseInfo
+   *  info which may 

[35/50] [abbrv] hadoop git commit: YARN-4511. Common scheduler changes to support scheduler-specific oversubscription implementations.

2018-04-10 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b237095d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
index 1836919..02c0cc5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
@@ -563,7 +563,7 @@ public class TestNodeLabelContainerAllocation {
   int numContainers) {
 CapacityScheduler cs = (CapacityScheduler) 
rm.getRMContext().getScheduler();
 SchedulerNode node = cs.getSchedulerNode(nodeId);
-Assert.assertEquals(numContainers, node.getNumContainers());
+Assert.assertEquals(numContainers, node.getNumGuaranteedContainers());
   }
 
   /**
@@ -1065,7 +1065,7 @@ public class TestNodeLabelContainerAllocation {
 for (int i = 0; i < 50; i++) {
   cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
   cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
-  if (schedulerNode1.getNumContainers() == 0) {
+  if (schedulerNode1.getNumGuaranteedContainers() == 0) {
 cycleWaited++;
   }
 }
@@ -1131,7 +1131,7 @@ public class TestNodeLabelContainerAllocation {
 CSAMContainerLaunchDiagnosticsConstants.LAST_NODE_PROCESSED_MSG
 + nodeIdStr + " ( Partition : [x]"));
 Assert.assertEquals(0, cs.getSchedulerNode(nm1.getNodeId())
-.getNumContainers());
+.getNumGuaranteedContainers());
 
 rm1.close();
   }
@@ -1215,7 +1215,7 @@ public class TestNodeLabelContainerAllocation {
 }
 
 // app1 gets all resource in partition=x
-Assert.assertEquals(10, schedulerNode1.getNumContainers());
+Assert.assertEquals(10, schedulerNode1.getNumGuaranteedContainers());
 
 // check non-exclusive containers of LeafQueue is correctly updated
 LeafQueue leafQueue = (LeafQueue) cs.getQueue("a");
@@ -1943,7 +1943,7 @@ public class TestNodeLabelContainerAllocation {
 }
 
 // app1 gets all resource in partition=x
-Assert.assertEquals(5, schedulerNode1.getNumContainers());
+Assert.assertEquals(5, schedulerNode1.getNumGuaranteedContainers());
 
 SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
 .getNodeReport(nm1.getNodeId());
@@ -2043,7 +2043,7 @@ public class TestNodeLabelContainerAllocation {
 }
 
 // app1 gets all resource in partition=x (non-exclusive)
-Assert.assertEquals(3, schedulerNode1.getNumContainers());
+Assert.assertEquals(3, schedulerNode1.getNumGuaranteedContainers());
 
 SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
 .getNodeReport(nm1.getNodeId());
@@ -2074,7 +2074,7 @@ public class TestNodeLabelContainerAllocation {
 cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
 
 // app1 gets all resource in default partition
-Assert.assertEquals(2, schedulerNode2.getNumContainers());
+Assert.assertEquals(2, schedulerNode2.getNumGuaranteedContainers());
 
 // 3GB is used from label x quota. 2GB used from default label.
 // So total 2.5 GB is remaining.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b237095d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
index 2512787..6390297 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
+++ 

[09/50] [abbrv] hadoop git commit: YARN-6936. [Atsv2] Retrospect storing entities into sub application table from client perspective. (Rohith Sharma K S via Haibo Chen)

2018-04-10 Thread haibochen
YARN-6936. [Atsv2] Retrospect storing entities into sub application table from 
client perspective. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8b8bd53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8b8bd53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8b8bd53

Branch: refs/heads/YARN-1011
Commit: f8b8bd53c4797d406bea5b1b0cdb179e209169cc
Parents: d737bf99
Author: Haibo Chen 
Authored: Thu Apr 5 10:22:50 2018 -0700
Committer: Haibo Chen 
Committed: Thu Apr 5 10:23:42 2018 -0700

--
 .../timelineservice/SubApplicationEntity.java   | 50 
 .../yarn/client/api/TimelineV2Client.java   | 47 +++---
 .../client/api/impl/TimelineV2ClientImpl.java   | 30 ++--
 ...stTimelineReaderWebServicesHBaseStorage.java |  7 +--
 .../TestHBaseTimelineStorageEntities.java   |  3 +-
 .../storage/HBaseTimelineWriterImpl.java|  3 +-
 .../collector/TimelineCollectorWebService.java  | 19 ++--
 7 files changed, 138 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8b8bd53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
new file mode 100644
index 000..a83ef3d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records.timelineservice;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This entity represents a user defined entities to be stored under sub
+ * application table.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class SubApplicationEntity extends HierarchicalTimelineEntity {
+
+  public static final String YARN_APPLICATION_ID = "YARN_APPLICATION_ID";
+
+  public SubApplicationEntity(TimelineEntity entity) {
+super(entity);
+  }
+
+  /**
+   * Checks if the input TimelineEntity object is an SubApplicationEntity.
+   *
+   * @param te TimelineEntity object.
+   * @return true if input is an SubApplicationEntity, false otherwise
+   */
+  public static boolean isSubApplicationEntity(TimelineEntity te) {
+return (te != null && te instanceof SubApplicationEntity);
+  }
+
+  public void setApplicationId(String appId) {
+addInfo(YARN_APPLICATION_ID, appId);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8b8bd53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
index 423c059..e987b46 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
@@ -54,9 +54,10 @@ public abstract class TimelineV2Client extends 
CompositeService {
 
   /**
* 
-   * Send the information of a number of conceptual entities to the timeline
-   * service v.2 collector. It is a blocking API. The method will 

[13/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
new file mode 100644
index 000..ab7c120
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
@@ -0,0 +1,3034 @@
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+  
+  
+
+
+  
+  
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+  
+  
+  
+  
+
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+
+
+ 

[41/50] [abbrv] hadoop git commit: YARN-1015. FS should watch node resource utilization and allocate opportunistic containers if appropriate.

2018-04-10 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/82ef338d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 1227de2..ac925c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -55,14 +55,20 @@ import org.apache.hadoop.yarn.MockApps;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import 
org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
@@ -72,6 +78,8 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
+import org.apache.hadoop.yarn.server.api.records.OverAllocationInfo;
+import org.apache.hadoop.yarn.server.api.records.ResourceThresholds;
 import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -93,6 +101,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 
 
@@ -1056,15 +1065,15 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 assertEquals(
 YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
 scheduler.getQueueManager().getQueue("queue1").
-getResourceUsage().getMemorySize());
+getGuaranteedResourceUsage().getMemorySize());
 
 NodeUpdateSchedulerEvent updateEvent2 = new 
NodeUpdateSchedulerEvent(node2);
 scheduler.handle(updateEvent2);
 
 assertEquals(1024, scheduler.getQueueManager().getQueue("queue1").
-  getResourceUsage().getMemorySize());
+getGuaranteedResourceUsage().getMemorySize());
 assertEquals(2, scheduler.getQueueManager().getQueue("queue1").
-  getResourceUsage().getVirtualCores());
+getGuaranteedResourceUsage().getVirtualCores());
 
 // verify metrics
 QueueMetrics queue1Metrics = scheduler.getQueueManager().getQueue("queue1")
@@ -1099,7 +1108,7 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 
 // Make sure queue 1 is allocated app capacity
 assertEquals(1024, scheduler.getQueueManager().getQueue("queue1").
-getResourceUsage().getMemorySize());
+getGuaranteedResourceUsage().getMemorySize());
 
 // Now queue 2 requests likewise
 ApplicationAttemptId attId = createSchedulingRequest(1024, "queue2", 
"user1", 1);
@@ -1109,7 +1118,7 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 
 // Make sure queue 2 is waiting 

[37/50] [abbrv] hadoop git commit: YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)

2018-04-10 Thread haibochen
YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6278cc71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6278cc71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6278cc71

Branch: refs/heads/YARN-1011
Commit: 6278cc716cf0e63d1a86a74b3519d52e0924d7aa
Parents: e9b9f48
Author: Karthik Kambatla 
Authored: Fri Jan 29 14:31:45 2016 -0800
Committer: Haibo Chen 
Committed: Mon Apr 9 17:07:06 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 ++-
 .../src/main/resources/yarn-default.xml |  21 
 .../RegisterNodeManagerRequest.java |  14 ++-
 .../pb/RegisterNodeManagerRequestPBImpl.java|  48 -
 .../server/api/records/OverAllocationInfo.java  |  45 
 .../server/api/records/ResourceThresholds.java  |  45 
 .../impl/pb/OverAllocationInfoPBImpl.java   | 106 +++
 .../impl/pb/ResourceThresholdsPBImpl.java   |  93 
 .../yarn_server_common_service_protos.proto |  10 ++
 .../hadoop/yarn/server/nodemanager/Context.java |   5 +
 .../yarn/server/nodemanager/NodeManager.java|  17 +++
 .../nodemanager/NodeStatusUpdaterImpl.java  |   7 +-
 .../monitor/ContainersMonitorImpl.java  |  34 ++
 .../amrmproxy/BaseAMRMProxyTest.java|  11 ++
 14 files changed, 457 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6278cc71/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2590b6f..2d69fa9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2042,7 +2042,6 @@ public class YarnConfiguration extends Configuration {
   public static final boolean 
DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE =
   false;
 
-
   // Configurations for applicaiton life time monitor feature
   public static final String RM_APPLICATION_MONITOR_INTERVAL_MS =
   RM_PREFIX + "application-timeouts.monitor.interval-ms";
@@ -2050,6 +2049,18 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
   3000;
 
+  /** Overallocation (= allocation based on utilization) configs. */
+  public static final String NM_OVERALLOCATION_ALLOCATION_THRESHOLD =
+  NM_PREFIX + "overallocation.allocation-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_ALLOCATION_THRESHOLD
+  = 0f;
+  @Private
+  public static final float MAX_NM_OVERALLOCATION_ALLOCATION_THRESHOLD = 0.95f;
+  public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
+  = 0f;
+
   /**
* Interval of time the linux container executor should try cleaning up
* cgroups entry when cleaning up a container. This is required due to what 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6278cc71/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 81b6658..4a7548a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1710,6 +1710,27 @@
   
 
   
+The extent of over-allocation (container-allocation based on
+  current utilization instead of prior allocation) allowed on this node,
+  expressed as a float between 0 and 0.95. By default, over-allocation is
+  turned off (value = 0). When turned on, the node allows running
+  OPPORTUNISTIC containers when the aggregate utilization is under the
+  value specified here multiplied by the node's advertised capacity.
+
+yarn.nodemanager.overallocation.allocation-threshold
+0f
+  
+
+  
+When a node is over-allocated to improve utilization by
+  

[08/50] [abbrv] hadoop git commit: HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be considered as striped block. (Contributed by Lei (Eddy) Xu).

2018-04-10 Thread haibochen
HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be 
considered as striped block. (Contributed by Lei (Eddy) Xu).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d737bf99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d737bf99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d737bf99

Branch: refs/heads/YARN-1011
Commit: d737bf99d44ce34cd01baad716d23df269267c95
Parents: e52539b
Author: Lei Xu 
Authored: Wed Apr 4 15:56:17 2018 -0700
Committer: Lei Xu 
Committed: Thu Apr 5 09:59:10 2018 -0700

--
 .../server/blockmanagement/BlockIdManager.java  | 17 ++
 .../server/blockmanagement/BlockManager.java|  5 +-
 .../blockmanagement/BlockManagerSafeMode.java   |  2 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  | 12 ++--
 .../blockmanagement/CorruptReplicasMap.java | 35 +--
 .../blockmanagement/InvalidateBlocks.java   | 13 +++--
 .../blockmanagement/TestBlockManager.java   | 61 
 .../blockmanagement/TestCorruptReplicaInfo.java | 48 ++-
 8 files changed, 136 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 321155b..5eebe8e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -239,6 +239,23 @@ public class BlockIdManager {
 legacyGenerationStampLimit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
   }
 
+  /**
+   * Return true if the block is a striped block.
+   *
+   * Before HDFS-4645, block ID was randomly generated (legacy), so it is
+   * possible that legacy block ID to be negative, which should not be
+   * considered as striped block ID.
+   *
+   * @see #isLegacyBlock(Block) detecting legacy block IDs.
+   */
+  public boolean isStripedBlock(Block block) {
+return isStripedBlockID(block.getBlockId()) && !isLegacyBlock(block);
+  }
+
+  /**
+   * See {@link #isStripedBlock(Block)}, we should not use this function alone
+   * to determine a block is striped block.
+   */
   public static boolean isStripedBlockID(long id) {
 return BlockType.fromBlockId(id) == STRIPED;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f49e1d8..76a7781 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -448,7 +448,8 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
 invalidateBlocks = new InvalidateBlocks(
 datanodeManager.getBlockInvalidateLimit(),
-startupDelayBlockDeletionInMs);
+startupDelayBlockDeletionInMs,
+blockIdManager);
 
 // Compute the map capacity by allocating 2% of total memory
 blocksMap = new BlocksMap(
@@ -1677,7 +1678,7 @@ public class BlockManager implements BlockStatsMXBean {
   corrupted.setBlockId(b.getStored().getBlockId());
 }
 corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(),
-b.getReasonCode());
+b.getReasonCode(), b.getStored().isStriped());
 
 NumberReplicas numberOfReplicas = countNodes(b.getStored());
 boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
 

[12/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
new file mode 100644
index 000..1e826f3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
@@ -0,0 +1,1331 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+
+
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   >