[3/4] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-16 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/42b69405/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
new file mode 100644
index 000..b5b5f77
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
@@ -0,0 +1,440 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api.impl;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.ConnectException;
+import java.net.HttpURLConnection;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URL;
+import java.net.URLConnection;
+import java.security.GeneralSecurityException;
+import java.security.PrivilegedExceptionAction;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticator;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientRequest;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.api.client.filter.ClientFilter;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+
+/**
+ * Utility Connector class which is used by timeline clients to securely get
+ * connected to the timeline server.
+ *
+ */
+public class TimelineConnector extends AbstractService {
+
+  private static final Joiner JOINER = Joiner.on("");
+  private static final Log LOG = LogFactory.getLog(TimelineConnector.class);
+  public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
+
+  private SSLFactory sslFactory;
+  private Client client;
+  private ConnectionConfigurator connConfigurator;
+  private DelegationTokenAuthenticator authenticator;
+  private DelegationTokenAuthenticatedURL.Token token;
+  private UserGroupInformation authUgi;
+  private String doAsUser;
+  @VisibleForTesting
+  TimelineClientConnectionRetry connectionRetry;
+  private boolean requireConnectionRetry;
+
+  public TimelineConnector(boolean requireConnectionRetry,
+  UserGroupInformation authUgi, String doAsUser,
+  DelegationTokenAuthenticatedURL.Token token) {
+super("TimelineConnector");
+this.requireConnectionRetry = 

[4/4] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-16 Thread sjlee
YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate 
classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

(cherry picked from commit 73235ab30361b41293846189f3c5fef321ae7cac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42b69405
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42b69405
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42b69405

Branch: refs/heads/YARN-5355-branch-2
Commit: 42b69405fa24abf3949a680524fd3c52f284fa60
Parents: 47ec7f9
Author: Sangjin Lee 
Authored: Thu Feb 16 18:43:31 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Feb 16 18:58:19 2017 -0800

--
 .../jobhistory/JobHistoryEventHandler.java  |  57 +-
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  14 +-
 .../v2/app/rm/RMContainerAllocator.java |   4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  |   8 +-
 .../distributedshell/ApplicationMaster.java | 113 +--
 .../hadoop/yarn/client/api/AMRMClient.java  |  39 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  19 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   5 +-
 .../yarn/client/api/impl/YarnClientImpl.java|  19 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |  94 +--
 .../yarn/client/api/TimelineV2Client.java   |  92 +++
 .../client/api/impl/TimelineClientImpl.java | 822 ++-
 .../yarn/client/api/impl/TimelineConnector.java | 440 ++
 .../client/api/impl/TimelineV2ClientImpl.java   | 459 +++
 .../client/api/impl/TestTimelineClient.java |  39 +-
 .../api/impl/TestTimelineClientV2Impl.java  |   4 +-
 .../timelineservice/NMTimelinePublisher.java|  22 +-
 .../TestNMTimelinePublisher.java|  10 +-
 .../TestTimelineServiceClientIntegration.java   |  10 +-
 19 files changed, 1278 insertions(+), 992 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42b69405/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index ee5f8bd..4cf42f1 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -72,13 +72,12 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.util.TimelineServiceHelper;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.node.JsonNodeFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
@@ -91,8 +90,6 @@ import com.sun.jersey.api.client.ClientHandlerException;
  */
 public class JobHistoryEventHandler extends AbstractService
 implements EventHandler {
-  private static final JsonNodeFactory FACTORY =
-  new ObjectMapper().getNodeFactory();
 
   private final AppContext context;
   private final int startCount;
@@ -134,9 +131,10 @@ public class JobHistoryEventHandler extends AbstractService
   // should job completion be force when the AM shuts down?
   protected volatile boolean forceJobCompletion = false;
 
+  @VisibleForTesting
   protected TimelineClient timelineClient;
-
-  private boolean timelineServiceV2Enabled = false;
+  @VisibleForTesting
+  protected TimelineV2Client timelineV2Client;
 
   private static String MAPREDUCE_JOB_ENTITY_TYPE = "MAPREDUCE_JOB";
   private static String MAPREDUCE_TASK_ENTITY_TYPE = "MAPREDUCE_TASK";
@@ -269,12 +267,17 @@ public class JobHistoryEventHandler extends 
AbstractService
 MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)) {
   LOG.info("Emitting job history data 

[1/4] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-16 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 b92089c0e -> 73235ab30
  refs/heads/YARN-5355-branch-2 47ec7f927 -> 42b69405f


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73235ab3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
new file mode 100644
index 000..b5b5f77
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
@@ -0,0 +1,440 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api.impl;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.ConnectException;
+import java.net.HttpURLConnection;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URL;
+import java.net.URLConnection;
+import java.security.GeneralSecurityException;
+import java.security.PrivilegedExceptionAction;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticator;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientRequest;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.api.client.filter.ClientFilter;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+
+/**
+ * Utility Connector class which is used by timeline clients to securely get
+ * connected to the timeline server.
+ *
+ */
+public class TimelineConnector extends AbstractService {
+
+  private static final Joiner JOINER = Joiner.on("");
+  private static final Log LOG = LogFactory.getLog(TimelineConnector.class);
+  public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
+
+  private SSLFactory sslFactory;
+  private Client client;
+  private ConnectionConfigurator connConfigurator;
+  private DelegationTokenAuthenticator authenticator;
+  private DelegationTokenAuthenticatedURL.Token token;
+  private UserGroupInformation authUgi;
+  private String doAsUser;
+  @VisibleForTesting
+  TimelineClientConnectionRetry connectionRetry;
+  private boolean requireConnectionRetry;
+
+  public TimelineConnector(boolean requireConnectionRetry,
+  UserGroupInformation authUgi, String 

[2/4] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-16 Thread sjlee
YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate 
classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73235ab3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73235ab3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73235ab3

Branch: refs/heads/YARN-5355
Commit: 73235ab30361b41293846189f3c5fef321ae7cac
Parents: b92089c
Author: Sangjin Lee 
Authored: Thu Feb 16 18:43:31 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Feb 16 18:43:31 2017 -0800

--
 .../jobhistory/JobHistoryEventHandler.java  |  57 +-
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  14 +-
 .../v2/app/rm/RMContainerAllocator.java |   4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  |   8 +-
 .../distributedshell/ApplicationMaster.java | 113 +--
 .../hadoop/yarn/client/api/AMRMClient.java  |  39 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  19 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   5 +-
 .../yarn/client/api/impl/YarnClientImpl.java|  19 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |  94 +--
 .../yarn/client/api/TimelineV2Client.java   |  92 +++
 .../client/api/impl/TimelineClientImpl.java | 823 ++-
 .../yarn/client/api/impl/TimelineConnector.java | 440 ++
 .../client/api/impl/TimelineV2ClientImpl.java   | 459 +++
 .../client/api/impl/TestTimelineClient.java |  39 +-
 .../api/impl/TestTimelineClientV2Impl.java  |   4 +-
 .../timelineservice/NMTimelinePublisher.java|  22 +-
 .../TestNMTimelinePublisher.java|  10 +-
 .../TestTimelineServiceClientIntegration.java   |  10 +-
 19 files changed, 1278 insertions(+), 993 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73235ab3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index dc3a29a..bbc7f6e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -72,13 +72,12 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.util.TimelineServiceHelper;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.node.JsonNodeFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
@@ -91,8 +90,6 @@ import com.sun.jersey.api.client.ClientHandlerException;
  */
 public class JobHistoryEventHandler extends AbstractService
 implements EventHandler {
-  private static final JsonNodeFactory FACTORY =
-  new ObjectMapper().getNodeFactory();
 
   private final AppContext context;
   private final int startCount;
@@ -134,9 +131,10 @@ public class JobHistoryEventHandler extends AbstractService
   // should job completion be force when the AM shuts down?
   protected volatile boolean forceJobCompletion = false;
 
+  @VisibleForTesting
   protected TimelineClient timelineClient;
-
-  private boolean timelineServiceV2Enabled = false;
+  @VisibleForTesting
+  protected TimelineV2Client timelineV2Client;
 
   private static String MAPREDUCE_JOB_ENTITY_TYPE = "MAPREDUCE_JOB";
   private static String MAPREDUCE_TASK_ENTITY_TYPE = "MAPREDUCE_TASK";
@@ -269,12 +267,17 @@ public class JobHistoryEventHandler extends 
AbstractService
 MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)) {
   LOG.info("Emitting job history data to the timeline service is enabled");
   if 

hadoop git commit: HDFS-11410. Use the cached instance when edit logging SetAclOp, SetXAttrOp and RemoveXAttrOp.

2017-02-16 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b23e93d46 -> 06386b7e7


HDFS-11410. Use the cached instance when edit logging SetAclOp, SetXAttrOp and 
RemoveXAttrOp.

(cherry picked from commit 02c549484a4fe6215c7f1a18d89389dbba6ea723)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06386b7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06386b7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06386b7e

Branch: refs/heads/branch-2
Commit: 06386b7e78dd2123b841616ce25146407a69ec2f
Parents: b23e93d
Author: Xiao Chen 
Authored: Thu Feb 16 18:07:55 2017 -0800
Committer: Xiao Chen 
Committed: Thu Feb 16 18:08:16 2017 -0800

--
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  6 +++---
 .../hdfs/server/namenode/FSEditLogOp.java   | 21 +---
 2 files changed, 12 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06386b7e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 29492b4..2ab15fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -1207,14 +1207,14 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   void logSetAcl(String src, List entries) {
-SetAclOp op = SetAclOp.getInstance();
+final SetAclOp op = SetAclOp.getInstance(cache.get());
 op.src = src;
 op.aclEntries = entries;
 logEdit(op);
   }
   
   void logSetXAttrs(String src, List xAttrs, boolean toLogRpcIds) {
-final SetXAttrOp op = SetXAttrOp.getInstance();
+final SetXAttrOp op = SetXAttrOp.getInstance(cache.get());
 op.src = src;
 op.xAttrs = xAttrs;
 logRpcIds(op, toLogRpcIds);
@@ -1222,7 +1222,7 @@ public class FSEditLog implements LogsPurgeable {
   }
   
   void logRemoveXAttrs(String src, List xAttrs, boolean toLogRpcIds) {
-final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
+final RemoveXAttrOp op = RemoveXAttrOp.getInstance(cache.get());
 op.src = src;
 op.xAttrs = xAttrs;
 logRpcIds(op, toLogRpcIds);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06386b7e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index ae9bfe1..bee71c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -3745,8 +3745,7 @@ public abstract class FSEditLogOp {
 }
 
 static AddCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
-  return (AddCacheDirectiveInfoOp) cache
-  .get(OP_ADD_CACHE_DIRECTIVE);
+  return (AddCacheDirectiveInfoOp) cache.get(OP_ADD_CACHE_DIRECTIVE);
 }
 
 @Override
@@ -3816,8 +3815,7 @@ public abstract class FSEditLogOp {
 }
 
 static ModifyCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
-  return (ModifyCacheDirectiveInfoOp) cache
-  .get(OP_MODIFY_CACHE_DIRECTIVE);
+  return (ModifyCacheDirectiveInfoOp) cache.get(OP_MODIFY_CACHE_DIRECTIVE);
 }
 
 @Override
@@ -3893,8 +3891,7 @@ public abstract class FSEditLogOp {
 }
 
 static RemoveCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
-  return (RemoveCacheDirectiveInfoOp) cache
-  .get(OP_REMOVE_CACHE_DIRECTIVE);
+  return (RemoveCacheDirectiveInfoOp) cache.get(OP_REMOVE_CACHE_DIRECTIVE);
 }
 
 @Override
@@ -4146,8 +4143,8 @@ public abstract class FSEditLogOp {
   super(OP_REMOVE_XATTR);
 }
 
-static RemoveXAttrOp getInstance() {
-  return new RemoveXAttrOp();
+static RemoveXAttrOp getInstance(OpInstanceCache cache) {
+  return (RemoveXAttrOp) cache.get(OP_REMOVE_XATTR);
 }
 
 @Override
@@ -4199,8 +4196,8 @@ public abstract class FSEditLogOp {
   super(OP_SET_XATTR);
 }
 
-static SetXAttrOp getInstance() {
-  return new SetXAttrOp();
+static SetXAttrOp getInstance(OpInstanceCache cache) {
+  

hadoop git commit: HDFS-11410. Use the cached instance when edit logging SetAclOp, SetXAttrOp and RemoveXAttrOp.

2017-02-16 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 658702eff -> 02c549484


HDFS-11410. Use the cached instance when edit logging SetAclOp, SetXAttrOp and 
RemoveXAttrOp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02c54948
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02c54948
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02c54948

Branch: refs/heads/trunk
Commit: 02c549484a4fe6215c7f1a18d89389dbba6ea723
Parents: 658702e
Author: Xiao Chen 
Authored: Thu Feb 16 18:07:55 2017 -0800
Committer: Xiao Chen 
Committed: Thu Feb 16 18:07:55 2017 -0800

--
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  6 +++---
 .../hdfs/server/namenode/FSEditLogOp.java   | 21 +---
 2 files changed, 12 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02c54948/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 8454a46..d3f4447 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -1206,14 +1206,14 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   void logSetAcl(String src, List entries) {
-SetAclOp op = SetAclOp.getInstance();
+final SetAclOp op = SetAclOp.getInstance(cache.get());
 op.src = src;
 op.aclEntries = entries;
 logEdit(op);
   }
   
   void logSetXAttrs(String src, List xAttrs, boolean toLogRpcIds) {
-final SetXAttrOp op = SetXAttrOp.getInstance();
+final SetXAttrOp op = SetXAttrOp.getInstance(cache.get());
 op.src = src;
 op.xAttrs = xAttrs;
 logRpcIds(op, toLogRpcIds);
@@ -1221,7 +1221,7 @@ public class FSEditLog implements LogsPurgeable {
   }
   
   void logRemoveXAttrs(String src, List xAttrs, boolean toLogRpcIds) {
-final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
+final RemoveXAttrOp op = RemoveXAttrOp.getInstance(cache.get());
 op.src = src;
 op.xAttrs = xAttrs;
 logRpcIds(op, toLogRpcIds);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02c54948/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index a3285a9..6293557 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -3745,8 +3745,7 @@ public abstract class FSEditLogOp {
 }
 
 static AddCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
-  return (AddCacheDirectiveInfoOp) cache
-  .get(OP_ADD_CACHE_DIRECTIVE);
+  return (AddCacheDirectiveInfoOp) cache.get(OP_ADD_CACHE_DIRECTIVE);
 }
 
 @Override
@@ -3816,8 +3815,7 @@ public abstract class FSEditLogOp {
 }
 
 static ModifyCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
-  return (ModifyCacheDirectiveInfoOp) cache
-  .get(OP_MODIFY_CACHE_DIRECTIVE);
+  return (ModifyCacheDirectiveInfoOp) cache.get(OP_MODIFY_CACHE_DIRECTIVE);
 }
 
 @Override
@@ -3893,8 +3891,7 @@ public abstract class FSEditLogOp {
 }
 
 static RemoveCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
-  return (RemoveCacheDirectiveInfoOp) cache
-  .get(OP_REMOVE_CACHE_DIRECTIVE);
+  return (RemoveCacheDirectiveInfoOp) cache.get(OP_REMOVE_CACHE_DIRECTIVE);
 }
 
 @Override
@@ -4146,8 +4143,8 @@ public abstract class FSEditLogOp {
   super(OP_REMOVE_XATTR);
 }
 
-static RemoveXAttrOp getInstance() {
-  return new RemoveXAttrOp();
+static RemoveXAttrOp getInstance(OpInstanceCache cache) {
+  return (RemoveXAttrOp) cache.get(OP_REMOVE_XATTR);
 }
 
 @Override
@@ -4199,8 +4196,8 @@ public abstract class FSEditLogOp {
   super(OP_SET_XATTR);
 }
 
-static SetXAttrOp getInstance() {
-  return new SetXAttrOp();
+static SetXAttrOp getInstance(OpInstanceCache cache) {
+  return (SetXAttrOp) cache.get(OP_SET_XATTR);
 }
 
 @Override
@@ -4252,8 

hadoop git commit: HADOOP-14040. Use shaded aws-sdk uber-JAR 1.11.86. Contributed by Steve Loughran and Sean Mackrory

2017-02-16 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 29e100717 -> b23e93d46


HADOOP-14040. Use shaded aws-sdk uber-JAR 1.11.86. Contributed by Steve 
Loughran and Sean Mackrory

(cherry picked from commit 658702efffdf52cf5ddf8e92f959f1157c95a348)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b23e93d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b23e93d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b23e93d4

Branch: refs/heads/branch-2
Commit: b23e93d465a16a1604266b010776807d50259a0d
Parents: 29e1007
Author: Mingliang Liu 
Authored: Thu Feb 16 16:51:03 2017 -0800
Committer: Mingliang Liu 
Committed: Thu Feb 16 16:59:31 2017 -0800

--
 hadoop-project/pom.xml  | 9 ++---
 hadoop-tools/hadoop-aws/pom.xml | 7 +--
 2 files changed, 3 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b23e93d4/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index bd96691..e156a73 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -117,7 +117,7 @@
 1.0-beta-1
 1.0-alpha-8
 900
-1.11.45
+1.11.86
   
 
   
@@ -684,12 +684,7 @@
   
   
 com.amazonaws
-aws-java-sdk-s3
-${aws-java-sdk.version}
-  
-  
-com.amazonaws
-aws-java-sdk-sts
+aws-java-sdk-bundle
 ${aws-java-sdk.version}
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b23e93d4/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index bf41864..7e90ac9 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -314,7 +314,7 @@
 
 
   com.amazonaws
-  aws-java-sdk-s3
+  aws-java-sdk-bundle
   compile
 
 
@@ -338,11 +338,6 @@
   joda-time
 
 
-  com.amazonaws
-  aws-java-sdk-sts
-  test
-
-
   junit
   junit
   test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14040. Use shaded aws-sdk uber-JAR 1.11.86. Contributed by Steve Loughran and Sean Mackrory

2017-02-16 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk bdad8b7b9 -> 658702eff


HADOOP-14040. Use shaded aws-sdk uber-JAR 1.11.86. Contributed by Steve 
Loughran and Sean Mackrory


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/658702ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/658702ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/658702ef

Branch: refs/heads/trunk
Commit: 658702efffdf52cf5ddf8e92f959f1157c95a348
Parents: bdad8b7
Author: Mingliang Liu 
Authored: Thu Feb 16 16:51:03 2017 -0800
Committer: Mingliang Liu 
Committed: Thu Feb 16 16:51:03 2017 -0800

--
 hadoop-project/pom.xml  | 9 ++---
 hadoop-tools/hadoop-aws/pom.xml | 7 +--
 2 files changed, 3 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/658702ef/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 606f7fc..47e21d8 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -126,7 +126,7 @@
 1.0-beta-1
 1.0-alpha-8
 900
-1.11.45
+1.11.86
 
 ${project.version}
@@ -791,12 +791,7 @@
   
   
 com.amazonaws
-aws-java-sdk-s3
-${aws-java-sdk.version}
-  
-  
-com.amazonaws
-aws-java-sdk-sts
+aws-java-sdk-bundle
 ${aws-java-sdk.version}
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/658702ef/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 1f64b02..0fdbc5d 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -331,7 +331,7 @@
 
 
   com.amazonaws
-  aws-java-sdk-s3
+  aws-java-sdk-bundle
   compile
 
 
@@ -355,11 +355,6 @@
   joda-time
 
 
-  com.amazonaws
-  aws-java-sdk-sts
-  test
-
-
   junit
   junit
   test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14019. Fix some typos in the s3a docs. Contributed by Steve Loughran

2017-02-16 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 83a589618 -> 3b4f840cc


HADOOP-14019. Fix some typos in the s3a docs. Contributed by Steve Loughran

(cherry picked from commit bdad8b7b97d7f48119f016d68f32982d680c8796)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b4f840c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b4f840c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b4f840c

Branch: refs/heads/branch-2.8
Commit: 3b4f840ccbe50ccfe07737af24f6ff5b381492a1
Parents: 83a5896
Author: Mingliang Liu 
Authored: Thu Feb 16 16:41:31 2017 -0800
Committer: Mingliang Liu 
Committed: Thu Feb 16 16:43:22 2017 -0800

--
 .../src/site/markdown/tools/hadoop-aws/index.md | 16 +---
 1 file changed, 13 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b4f840c/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index ede7eb4..cc6c5df 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -989,7 +989,7 @@ This is because the property values are kept in these 
files, and cannot be
 dynamically patched.
 
 Instead, callers need to create different configuration files for each
-bucket, setting the base secrets (`fs.s3a.bucket.nightly.access.key`, etc),
+bucket, setting the base secrets (`fs.s3a.access.key`, etc),
 then declare the path to the appropriate credential file in
 a bucket-specific version of the property 
`fs.s3a.security.credential.provider.path`.
 
@@ -1063,7 +1063,7 @@ declaration. For example:
 ### Stabilizing: S3A Fast Upload
 
 
-**New in Hadoop 2.7; significantly enhanced in Hadoop 2.9**
+**New in Hadoop 2.7; significantly enhanced in Hadoop 2.8**
 
 
 Because of the nature of the S3 object store, data written to an S3A 
`OutputStream`
@@ -1223,8 +1223,18 @@ consumed, and so eliminates heap size as the limiting 
factor in queued uploads
   disk
 
 
+
+  fs.s3a.buffer.dir
+  
+  Comma separated list of temporary directories use for
+  storing blocks of data prior to their being uploaded to S3.
+  When unset, the Hadoop temporary directory hadoop.tmp.dir is 
used
+
+
 ```
 
+This is the default buffer mechanism. The amount of data which can
+be buffered is limited by the amount of available disk space.
 
  Fast Upload with ByteBuffers: 
`fs.s3a.fast.upload.buffer=bytebuffer`
 
@@ -1238,7 +1248,7 @@ The amount of data which can be buffered is
 limited by the Java runtime, the operating system, and, for YARN applications,
 the amount of memory requested for each container.
 
-The slower the write bandwidth to S3, the greater the risk of running out
+The slower the upload bandwidth to S3, the greater the risk of running out
 of memory —and so the more care is needed in
 [tuning the upload settings](#s3a_fast_upload_thread_tuning).
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14019. Fix some typos in the s3a docs. Contributed by Steve Loughran

2017-02-16 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 afc8124ff -> 29e100717


HADOOP-14019. Fix some typos in the s3a docs. Contributed by Steve Loughran

(cherry picked from commit bdad8b7b97d7f48119f016d68f32982d680c8796)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29e10071
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29e10071
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29e10071

Branch: refs/heads/branch-2
Commit: 29e1007172caaeecf14884adce83e748e2cb5e8e
Parents: afc8124
Author: Mingliang Liu 
Authored: Thu Feb 16 16:41:31 2017 -0800
Committer: Mingliang Liu 
Committed: Thu Feb 16 16:42:56 2017 -0800

--
 .../src/site/markdown/tools/hadoop-aws/index.md | 16 +---
 1 file changed, 13 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29e10071/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index f57610f..c4e785e 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -999,7 +999,7 @@ This is because the property values are kept in these 
files, and cannot be
 dynamically patched.
 
 Instead, callers need to create different configuration files for each
-bucket, setting the base secrets (`fs.s3a.bucket.nightly.access.key`, etc),
+bucket, setting the base secrets (`fs.s3a.access.key`, etc),
 then declare the path to the appropriate credential file in
 a bucket-specific version of the property 
`fs.s3a.security.credential.provider.path`.
 
@@ -1073,7 +1073,7 @@ declaration. For example:
 ### Stabilizing: S3A Fast Upload
 
 
-**New in Hadoop 2.7; significantly enhanced in Hadoop 2.9**
+**New in Hadoop 2.7; significantly enhanced in Hadoop 2.8**
 
 
 Because of the nature of the S3 object store, data written to an S3A 
`OutputStream`
@@ -1233,8 +1233,18 @@ consumed, and so eliminates heap size as the limiting 
factor in queued uploads
   disk
 
 
+
+  fs.s3a.buffer.dir
+  
+  Comma separated list of temporary directories use for
+  storing blocks of data prior to their being uploaded to S3.
+  When unset, the Hadoop temporary directory hadoop.tmp.dir is 
used
+
+
 ```
 
+This is the default buffer mechanism. The amount of data which can
+be buffered is limited by the amount of available disk space.
 
  Fast Upload with ByteBuffers: 
`fs.s3a.fast.upload.buffer=bytebuffer`
 
@@ -1248,7 +1258,7 @@ The amount of data which can be buffered is
 limited by the Java runtime, the operating system, and, for YARN applications,
 the amount of memory requested for each container.
 
-The slower the write bandwidth to S3, the greater the risk of running out
+The slower the upload bandwidth to S3, the greater the risk of running out
 of memory —and so the more care is needed in
 [tuning the upload settings](#s3a_fast_upload_thread_tuning).
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14019. Fix some typos in the s3a docs. Contributed by Steve Loughran

2017-02-16 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk f43299902 -> bdad8b7b9


HADOOP-14019. Fix some typos in the s3a docs. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bdad8b7b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bdad8b7b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bdad8b7b

Branch: refs/heads/trunk
Commit: bdad8b7b97d7f48119f016d68f32982d680c8796
Parents: f432999
Author: Mingliang Liu 
Authored: Thu Feb 16 16:41:31 2017 -0800
Committer: Mingliang Liu 
Committed: Thu Feb 16 16:41:31 2017 -0800

--
 .../src/site/markdown/tools/hadoop-aws/index.md | 16 +---
 1 file changed, 13 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdad8b7b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 2471a52..0ff314c 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -970,7 +970,7 @@ This is because the property values are kept in these 
files, and cannot be
 dynamically patched.
 
 Instead, callers need to create different configuration files for each
-bucket, setting the base secrets (`fs.s3a.bucket.nightly.access.key`, etc),
+bucket, setting the base secrets (`fs.s3a.access.key`, etc),
 then declare the path to the appropriate credential file in
 a bucket-specific version of the property 
`fs.s3a.security.credential.provider.path`.
 
@@ -1044,7 +1044,7 @@ declaration. For example:
 ### Stabilizing: S3A Fast Upload
 
 
-**New in Hadoop 2.7; significantly enhanced in Hadoop 2.9**
+**New in Hadoop 2.7; significantly enhanced in Hadoop 2.8**
 
 
 Because of the nature of the S3 object store, data written to an S3A 
`OutputStream`
@@ -1204,8 +1204,18 @@ consumed, and so eliminates heap size as the limiting 
factor in queued uploads
   disk
 
 
+
+  fs.s3a.buffer.dir
+  
+  Comma separated list of temporary directories use for
+  storing blocks of data prior to their being uploaded to S3.
+  When unset, the Hadoop temporary directory hadoop.tmp.dir is 
used
+
+
 ```
 
+This is the default buffer mechanism. The amount of data which can
+be buffered is limited by the amount of available disk space.
 
  Fast Upload with ByteBuffers: 
`fs.s3a.fast.upload.buffer=bytebuffer`
 
@@ -1219,7 +1229,7 @@ The amount of data which can be buffered is
 limited by the Java runtime, the operating system, and, for YARN applications,
 the amount of memory requested for each container.
 
-The slower the write bandwidth to S3, the greater the risk of running out
+The slower the upload bandwidth to S3, the greater the risk of running out
 of memory —and so the more care is needed in
 [tuning the upload settings](#s3a_fast_upload_thread_tuning).
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14046 Metastore destruction test creates table w/o version. Contributed by Sean Mackrory

2017-02-16 Thread fabbri
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 94287ce70 -> 8b37b6a96


HADOOP-14046 Metastore destruction test creates table w/o version. Contributed 
by Sean Mackrory


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b37b6a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b37b6a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b37b6a9

Branch: refs/heads/HADOOP-13345
Commit: 8b37b6a96ce3d66d5621ff3b4d8538c98b106907
Parents: 94287ce
Author: Aaron Fabbri 
Authored: Thu Feb 16 15:25:53 2017 -0800
Committer: Aaron Fabbri 
Committed: Thu Feb 16 15:25:53 2017 -0800

--
 .../s3a/s3guard/ITestS3GuardToolDynamoDB.java   | 120 ++-
 1 file changed, 37 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b37b6a9/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
index 4480044..fb0efd1 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
@@ -20,10 +20,6 @@ package org.apache.hadoop.fs.s3a.s3guard;
 
 import com.amazonaws.services.dynamodbv2.document.DynamoDB;
 import com.amazonaws.services.dynamodbv2.document.Table;
-import com.amazonaws.services.dynamodbv2.model.AttributeDefinition;
-import com.amazonaws.services.dynamodbv2.model.CreateTableRequest;
-import com.amazonaws.services.dynamodbv2.model.KeySchemaElement;
-import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughput;
 import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.DestroyMetadata;
@@ -31,7 +27,6 @@ import 
org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.InitMetadata;
 import org.junit.Test;
 
 import java.io.IOException;
-import java.util.Collection;
 import java.util.Random;
 
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.SUCCESS;
@@ -66,91 +61,50 @@ public class ITestS3GuardToolDynamoDB extends 
S3GuardToolTestBase {
   }
 
   @Test
-  public void testInitDynamoDBMetadataStore() throws IOException {
-final String testS3Url = getTestPath("init");
-String testTableName = "initDynamoDBMetadataStore" +
-new Random().nextInt();
-
-InitMetadata cmd = new InitMetadata(getFs().getConf());
-Table table = null;
-
-try {
-  String[] args = new String[]{
-  "init", "-m", "dynamodb://" + testTableName, testS3Url
-  };
-  assertEquals("Init command did not exit successfully - see output",
-  SUCCESS, cmd.run(args));
-  // Verify the existence of the dynamodb table.
-  try {
-MetadataStore ms = getMetadataStore();
-assertTrue("metadata store should be DynamoDBMetadataStore",
-ms instanceof DynamoDBMetadataStore);
-DynamoDBMetadataStore dynamoMs = (DynamoDBMetadataStore) ms;
-DynamoDB db = dynamoMs.getDynamoDB();
-table = db.getTable(testTableName);
-table.describe();
-  } catch (ResourceNotFoundException e) {
-fail(String.format("DynamoDB table %s does not exist",
-testTableName));
-  }
-} finally {
-  // Clean the table.
-  try {
-if (table != null) {
-  table.delete();
-}
-  } catch (ResourceNotFoundException e) {
-// Ignore
-  }
-}
-  }
-
-  @Test
-  public void testDestroyDynamoDBMetadataStore()
-  throws IOException, InterruptedException {
-final String testS3Url = getTestPath("destroy");
-String testTableName = "destroyDynamoDBMetadataStore" +
-new Random().nextInt();
-
+  public void testDynamoDBInitDestroyCycle() throws IOException,
+  InterruptedException {
+String testTableName = "testDynamoDBInitDestroy" + new Random().nextInt();
+String testS3Url = getTestPath(testTableName);
 S3AFileSystem fs = getFs();
-DestroyMetadata cmd = new DestroyMetadata(fs.getConf());
-
-// Pre-alloc DynamoDB table.
-DynamoDB db = DynamoDBMetadataStore.createDynamoDB(fs);
-try {
-  Table table = db.getTable(testTableName);
-  table.delete();
-  table.waitForDelete();
-} catch (ResourceNotFoundException e) {
-  // Ignore.
-}
-Collection elements =
-PathMetadataDynamoDBTranslation.keySchema();
-Collection attrs =
- 

hadoop git commit: HADOOP-14049. Honour AclBit flag associated to file/folder permission for Azure datalake account. Contributed by Vishwajeet Dusane

2017-02-16 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk a77f43244 -> f43299902


HADOOP-14049. Honour AclBit flag associated to file/folder permission for Azure 
datalake account. Contributed by Vishwajeet Dusane


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4329990
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4329990
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4329990

Branch: refs/heads/trunk
Commit: f4329990250bed62efdebe3ce2bc740092cf9573
Parents: a77f432
Author: Mingliang Liu 
Authored: Thu Feb 16 15:14:25 2017 -0800
Committer: Mingliang Liu 
Committed: Thu Feb 16 15:14:25 2017 -0800

--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  6 +++-
 .../hadoop/fs/adl/TestADLResponseData.java  | 21 +
 .../apache/hadoop/fs/adl/TestGetFileStatus.java | 25 +++
 .../apache/hadoop/fs/adl/TestListStatus.java| 32 
 4 files changed, 83 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4329990/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 303b7bc..fb0feda 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -593,7 +593,11 @@ public class AdlFileSystem extends FileSystem {
 boolean isDirectory = entry.type == DirectoryEntryType.DIRECTORY;
 long lastModificationData = entry.lastModifiedTime.getTime();
 long lastAccessTime = entry.lastAccessTime.getTime();
-FsPermission permission = new AdlPermission(aclBitStatus,
+// set aclBit from ADLS backend response if
+// ADL_SUPPORT_ACL_BIT_IN_FSPERMISSION is true.
+final boolean aclBit = aclBitStatus ? entry.aclBit : false;
+
+FsPermission permission = new AdlPermission(aclBit,
 Short.valueOf(entry.permission, 8));
 String user = entry.user;
 String group = entry.group;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4329990/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
index 24eb314..788242e 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestADLResponseData.java
@@ -66,6 +66,15 @@ public final class TestADLResponseData {
 "\"owner\":\"NotSupportYet\",\"group\":\"NotSupportYet\"}}";
   }
 
+  public static String getGetFileStatusJSONResponse(boolean aclBit) {
+return "{\"FileStatus\":{\"length\":1024," +
+"\"pathSuffix\":\"\",\"type\":\"FILE\",\"blockSize\":268435456," +
+"\"accessTime\":1452103827023,\"modificationTime\":1452103827023," +
+"\"replication\":0,\"permission\":\"777\"," +
+"\"owner\":\"NotSupportYet\",\"group\":\"NotSupportYet\",\"aclBit\":\""
++ aclBit + "\"}}";
+  }
+
   public static String getListFileStatusJSONResponse(int dirSize) {
 String list = "";
 for (int i = 0; i < dirSize; ++i) {
@@ -81,6 +90,18 @@ public final class TestADLResponseData {
 return "{\"FileStatuses\":{\"FileStatus\":[" + list + "]}}";
   }
 
+  public static String getListFileStatusJSONResponse(boolean aclBit) {
+return "{\"FileStatuses\":{\"FileStatus\":[{\"length\":0,\"pathSuffix\":\""
++ java.util.UUID.randomUUID()
++ "\",\"type\":\"DIRECTORY\",\"blockSize\":0,"
++ "\"accessTime\":1481184513488,"
++ "\"modificationTime\":1481184513488,\"replication\":0,"
++ "\"permission\":\"770\","
++ "\"owner\":\"4b27fe1a-d9ab-4a04-ad7a-4bba72cd9e6c\","
++ "\"group\":\"4b27fe1a-d9ab-4a04-ad7a-4bba72cd9e6c\",\"aclBit\":\""
++ aclBit + "\"}]}}";
+  }
+
   public static String getJSONResponse(boolean status) {
 return "{\"boolean\":" + status + "}";
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4329990/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java
--
diff --git 

hadoop git commit: YARN-6171. ConcurrentModificationException on FSAppAttempt.containersToPreempt. (Miklos Szegedi via kasha)

2017-02-16 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8fc67e597 -> afc8124ff


YARN-6171. ConcurrentModificationException on FSAppAttempt.containersToPreempt. 
(Miklos Szegedi via kasha)

(cherry picked from commit a77f432449aad67da31bd8bf8644b71def741bde)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afc8124f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afc8124f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afc8124f

Branch: refs/heads/branch-2
Commit: afc8124ff63d16a58b91be2e4bd86b26316b1340
Parents: 8fc67e5
Author: Karthik Kambatla 
Authored: Thu Feb 16 14:54:51 2017 -0800
Committer: Karthik Kambatla 
Committed: Thu Feb 16 15:03:35 2017 -0800

--
 .../scheduler/fair/FSAppAttempt.java| 49 +++-
 .../scheduler/fair/FairScheduler.java   | 15 +++---
 2 files changed, 34 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afc8124f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 28c599e..8ff688e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -82,8 +82,10 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
   private Resource fairShare = Resources.createResource(0, 0);
 
   // Preemption related variables
+  private final Object preemptionVariablesLock = new Object();
   private final Resource preemptedResources = 
Resources.clone(Resources.none());
   private final Set containersToPreempt = new HashSet<>();
+
   private Resource fairshareStarvation = Resources.none();
   private long lastTimeAtFairShare;
   private long nextStarvationCheck;
@@ -551,29 +553,29 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   }
 
   void trackContainerForPreemption(RMContainer container) {
-if (containersToPreempt.add(container)) {
-  synchronized (preemptedResources) {
+synchronized (preemptionVariablesLock) {
+  if (containersToPreempt.add(container)) {
 Resources.addTo(preemptedResources, container.getAllocatedResource());
   }
 }
   }
 
   private void untrackContainerForPreemption(RMContainer container) {
-if (containersToPreempt.remove(container)) {
-  synchronized (preemptedResources) {
+synchronized (preemptionVariablesLock) {
+  if (containersToPreempt.remove(container)) {
 Resources.subtractFrom(preemptedResources,
 container.getAllocatedResource());
   }
 }
   }
 
-  Set getPreemptionContainers() {
-return containersToPreempt;
-  }
-
-  private Resource getPreemptedResources() {
-synchronized (preemptedResources) {
-  return preemptedResources;
+  Set getPreemptionContainerIds() {
+synchronized (preemptionVariablesLock) {
+  Set preemptionContainerIds = new HashSet<>();
+  for (RMContainer container : containersToPreempt) {
+preemptionContainerIds.add(container.getContainerId());
+  }
+  return preemptionContainerIds;
 }
   }
 
@@ -590,9 +592,11 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   return false;
 }
 
-if (containersToPreempt.contains(container)) {
-  // The container is already under consideration for preemption
-  return false;
+synchronized (preemptionVariablesLock) {
+  if (containersToPreempt.contains(container)) {
+// The container is already under consideration for preemption
+return false;
+  }
 }
 
 // Check if the app's allocation will be over its fairshare even
@@ -964,7 +968,8 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 if (LOG.isTraceEnabled()) {
   LOG.trace("Assign container on " + node.getNodeName()
   + " node, assignType: OFF_SWITCH" + ", allowedLocality: "
-  + allowedLocality + ", priority: " + 
schedulerKey.getPriority()
+

hadoop git commit: YARN-6171. ConcurrentModificationException on FSAppAttempt.containersToPreempt. (Miklos Szegedi via kasha)

2017-02-16 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5d339c46f -> a77f43244


YARN-6171. ConcurrentModificationException on FSAppAttempt.containersToPreempt. 
(Miklos Szegedi via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a77f4324
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a77f4324
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a77f4324

Branch: refs/heads/trunk
Commit: a77f432449aad67da31bd8bf8644b71def741bde
Parents: 5d339c4
Author: Karthik Kambatla 
Authored: Thu Feb 16 14:54:51 2017 -0800
Committer: Karthik Kambatla 
Committed: Thu Feb 16 14:54:58 2017 -0800

--
 .../scheduler/fair/FSAppAttempt.java| 49 +++-
 .../scheduler/fair/FairScheduler.java   | 15 +++---
 2 files changed, 34 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a77f4324/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 563b892..b1bb9a0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -83,8 +83,10 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
   private Resource fairShare = Resources.createResource(0, 0);
 
   // Preemption related variables
+  private final Object preemptionVariablesLock = new Object();
   private final Resource preemptedResources = 
Resources.clone(Resources.none());
   private final Set containersToPreempt = new HashSet<>();
+
   private Resource fairshareStarvation = Resources.none();
   private long lastTimeAtFairShare;
   private long nextStarvationCheck;
@@ -552,29 +554,29 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   }
 
   void trackContainerForPreemption(RMContainer container) {
-if (containersToPreempt.add(container)) {
-  synchronized (preemptedResources) {
+synchronized (preemptionVariablesLock) {
+  if (containersToPreempt.add(container)) {
 Resources.addTo(preemptedResources, container.getAllocatedResource());
   }
 }
   }
 
   private void untrackContainerForPreemption(RMContainer container) {
-if (containersToPreempt.remove(container)) {
-  synchronized (preemptedResources) {
+synchronized (preemptionVariablesLock) {
+  if (containersToPreempt.remove(container)) {
 Resources.subtractFrom(preemptedResources,
 container.getAllocatedResource());
   }
 }
   }
 
-  Set getPreemptionContainers() {
-return containersToPreempt;
-  }
-
-  private Resource getPreemptedResources() {
-synchronized (preemptedResources) {
-  return preemptedResources;
+  Set getPreemptionContainerIds() {
+synchronized (preemptionVariablesLock) {
+  Set preemptionContainerIds = new HashSet<>();
+  for (RMContainer container : containersToPreempt) {
+preemptionContainerIds.add(container.getContainerId());
+  }
+  return preemptionContainerIds;
 }
   }
 
@@ -591,9 +593,11 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   return false;
 }
 
-if (containersToPreempt.contains(container)) {
-  // The container is already under consideration for preemption
-  return false;
+synchronized (preemptionVariablesLock) {
+  if (containersToPreempt.contains(container)) {
+// The container is already under consideration for preemption
+return false;
+  }
 }
 
 // Check if the app's allocation will be over its fairshare even
@@ -969,7 +973,8 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 if (LOG.isTraceEnabled()) {
   LOG.trace("Assign container on " + node.getNodeName()
   + " node, assignType: OFF_SWITCH" + ", allowedLocality: "
-  + allowedLocality + ", priority: " + 
schedulerKey.getPriority()
+  + allowedLocality + ", priority: "
+  + 

hadoop git commit: YARN-6177. Yarn client should exit with an informative error message if an incompatible Jersey library is used at client. Contributed by Weiwei Yang.

2017-02-16 Thread gtcarrera9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 da944fe0f -> 83a589618


YARN-6177. Yarn client should exit with an informative error message if an 
incompatible Jersey library is used at client. Contributed by Weiwei Yang.

(cherry picked from commit 5d339c46f5b16b951afd82afd9e907b9aa2ded9a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83a58961
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83a58961
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83a58961

Branch: refs/heads/branch-2.8
Commit: 83a5896180019c6d147ace0ab992a783955aa5f8
Parents: da944fe
Author: Li Lu 
Authored: Thu Feb 16 13:40:26 2017 -0800
Committer: Li Lu 
Committed: Thu Feb 16 13:50:22 2017 -0800

--
 .../yarn/client/api/impl/YarnClientImpl.java|  10 ++
 .../yarn/client/api/impl/TestYarnClient.java| 165 ---
 2 files changed, 155 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83a58961/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 93893f9..07250c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -369,6 +369,16 @@ public class YarnClientImpl extends YarnClient {
 return null;
   }
   throw e;
+} catch (NoClassDefFoundError e) {
+  NoClassDefFoundError wrappedError = new NoClassDefFoundError(
+  e.getMessage() + ". It appears that the timeline client "
+  + "failed to initiate because an incompatible dependency "
+  + "in classpath. If timeline service is optional to this "
+  + "client, try to work around by setting "
+  + YarnConfiguration.TIMELINE_SERVICE_ENABLED
+  + " to false in client configuration.");
+  wrappedError.setStackTrace(e.getStackTrace());
+  throw wrappedError;
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/83a58961/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 8d199d2..c7d6c62 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -156,26 +156,6 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testStartWithTimelineV15Failure() throws Exception{
-Configuration conf = new Configuration();
-conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
-conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5f);
-conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT,
-true);
-YarnClient client = YarnClient.createYarnClient();
-if(client instanceof YarnClientImpl) {
-  YarnClientImpl impl = (YarnClientImpl) client;
-  YarnClientImpl spyClient = spy(impl);
-  when(spyClient.createTimelineClient()).thenThrow(
-  new IOException("ATS v1.5 client initialization failed. "));
-  spyClient.init(conf);
-  spyClient.start();
-  spyClient.getTimelineDelegationToken();
-  spyClient.stop();
-}
-  }
-
-  @Test
   public void testStartWithTimelineV15() throws Exception {
 Configuration conf = new Configuration();
 conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
@@ -186,6 +166,89 @@ public class TestYarnClient {
 client.stop();
   }
 
+  @Test
+  public void testStartTimelineClientWithErrors()
+  throws Exception {
+// If timeline client failed to init with a NoClassDefFoundError
+// it should be wrapped with an informative error message
+testCreateTimelineClientWithError(
+1.5f,
+true,
+false,
+new NoClassDefFoundError("Mock a 

hadoop git commit: YARN-6177. Yarn client should exit with an informative error message if an incompatible Jersey library is used at client. Contributed by Weiwei Yang.

2017-02-16 Thread gtcarrera9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f07b52dc5 -> 8fc67e597


YARN-6177. Yarn client should exit with an informative error message if an 
incompatible Jersey library is used at client. Contributed by Weiwei Yang.

(cherry picked from commit 5d339c46f5b16b951afd82afd9e907b9aa2ded9a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fc67e59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fc67e59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fc67e59

Branch: refs/heads/branch-2
Commit: 8fc67e5973b0663b93271e4fc378441408aa6b1e
Parents: f07b52d
Author: Li Lu 
Authored: Thu Feb 16 13:40:26 2017 -0800
Committer: Li Lu 
Committed: Thu Feb 16 13:46:30 2017 -0800

--
 .../yarn/client/api/impl/YarnClientImpl.java|  10 ++
 .../yarn/client/api/impl/TestYarnClient.java| 165 ---
 2 files changed, 155 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc67e59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index e406862..af46fbb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -376,6 +376,16 @@ public class YarnClientImpl extends YarnClient {
 return null;
   }
   throw e;
+} catch (NoClassDefFoundError e) {
+  NoClassDefFoundError wrappedError = new NoClassDefFoundError(
+  e.getMessage() + ". It appears that the timeline client "
+  + "failed to initiate because an incompatible dependency "
+  + "in classpath. If timeline service is optional to this "
+  + "client, try to work around by setting "
+  + YarnConfiguration.TIMELINE_SERVICE_ENABLED
+  + " to false in client configuration.");
+  wrappedError.setStackTrace(e.getStackTrace());
+  throw wrappedError;
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fc67e59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 6060a0b..d1c15e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -156,26 +156,6 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testStartWithTimelineV15Failure() throws Exception{
-Configuration conf = new Configuration();
-conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
-conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5f);
-conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT,
-true);
-YarnClient client = YarnClient.createYarnClient();
-if(client instanceof YarnClientImpl) {
-  YarnClientImpl impl = (YarnClientImpl) client;
-  YarnClientImpl spyClient = spy(impl);
-  when(spyClient.createTimelineClient()).thenThrow(
-  new IOException("ATS v1.5 client initialization failed. "));
-  spyClient.init(conf);
-  spyClient.start();
-  spyClient.getTimelineDelegationToken();
-  spyClient.stop();
-}
-  }
-
-  @Test
   public void testStartWithTimelineV15() throws Exception {
 Configuration conf = new Configuration();
 conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
@@ -186,6 +166,89 @@ public class TestYarnClient {
 client.stop();
   }
 
+  @Test
+  public void testStartTimelineClientWithErrors()
+  throws Exception {
+// If timeline client failed to init with a NoClassDefFoundError
+// it should be wrapped with an informative error message
+testCreateTimelineClientWithError(
+1.5f,
+true,
+false,
+new NoClassDefFoundError("Mock a 

hadoop git commit: YARN-6177. Yarn client should exit with an informative error message if an incompatible Jersey library is used at client. Contributed by Weiwei Yang.

2017-02-16 Thread gtcarrera9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4fa1afdb8 -> 5d339c46f


YARN-6177. Yarn client should exit with an informative error message if an 
incompatible Jersey library is used at client. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d339c46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d339c46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d339c46

Branch: refs/heads/trunk
Commit: 5d339c46f5b16b951afd82afd9e907b9aa2ded9a
Parents: 4fa1afd
Author: Li Lu 
Authored: Thu Feb 16 13:40:26 2017 -0800
Committer: Li Lu 
Committed: Thu Feb 16 13:41:42 2017 -0800

--
 .../yarn/client/api/impl/YarnClientImpl.java|  10 ++
 .../yarn/client/api/impl/TestYarnClient.java| 165 ---
 2 files changed, 155 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d339c46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 4a27fee..23b128c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -381,6 +381,16 @@ public class YarnClientImpl extends YarnClient {
 return null;
   }
   throw e;
+} catch (NoClassDefFoundError e) {
+  NoClassDefFoundError wrappedError = new NoClassDefFoundError(
+  e.getMessage() + ". It appears that the timeline client "
+  + "failed to initiate because an incompatible dependency "
+  + "in classpath. If timeline service is optional to this "
+  + "client, try to work around by setting "
+  + YarnConfiguration.TIMELINE_SERVICE_ENABLED
+  + " to false in client configuration.");
+  wrappedError.setStackTrace(e.getStackTrace());
+  throw wrappedError;
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d339c46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 240f31c..c2c9665 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -156,26 +156,6 @@ public class TestYarnClient {
   }
 
   @Test
-  public void testStartWithTimelineV15Failure() throws Exception{
-Configuration conf = new Configuration();
-conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
-conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5f);
-conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT,
-true);
-YarnClient client = YarnClient.createYarnClient();
-if(client instanceof YarnClientImpl) {
-  YarnClientImpl impl = (YarnClientImpl) client;
-  YarnClientImpl spyClient = spy(impl);
-  when(spyClient.createTimelineClient()).thenThrow(
-  new IOException("ATS v1.5 client initialization failed. "));
-  spyClient.init(conf);
-  spyClient.start();
-  spyClient.getTimelineDelegationToken();
-  spyClient.stop();
-}
-  }
-
-  @Test
   public void testStartWithTimelineV15() throws Exception {
 Configuration conf = new Configuration();
 conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
@@ -186,6 +166,89 @@ public class TestYarnClient {
 client.stop();
   }
 
+  @Test
+  public void testStartTimelineClientWithErrors()
+  throws Exception {
+// If timeline client failed to init with a NoClassDefFoundError
+// it should be wrapped with an informative error message
+testCreateTimelineClientWithError(
+1.5f,
+true,
+false,
+new NoClassDefFoundError("Mock a NoClassDefFoundError"),
+new CreateTimelineClientErrorVerifier(1) {
+

[2/2] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-16 Thread sjlee
YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate 
classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4fa1afdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4fa1afdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4fa1afdb

Branch: refs/heads/trunk
Commit: 4fa1afdb883dab8786d2fb5c72a195dd2e87d711
Parents: 5690b51
Author: Sangjin Lee 
Authored: Thu Feb 16 11:41:04 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Feb 16 11:41:04 2017 -0800

--
 .../jobhistory/JobHistoryEventHandler.java  |  57 +-
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  14 +-
 .../v2/app/rm/RMContainerAllocator.java |   4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  |   8 +-
 .../distributedshell/ApplicationMaster.java |  98 ++-
 .../hadoop/yarn/client/api/AMRMClient.java  |  40 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  21 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   5 +-
 .../yarn/client/api/impl/YarnClientImpl.java|  15 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |  94 +--
 .../yarn/client/api/TimelineV2Client.java   |  92 +++
 .../client/api/impl/TimelineClientImpl.java | 825 ++-
 .../yarn/client/api/impl/TimelineConnector.java | 440 ++
 .../client/api/impl/TimelineV2ClientImpl.java   | 459 +++
 .../client/api/impl/TestTimelineClient.java |  39 +-
 .../api/impl/TestTimelineClientV2Impl.java  |   4 +-
 .../timelineservice/NMTimelinePublisher.java|  22 +-
 .../TestNMTimelinePublisher.java|  10 +-
 .../TestTimelineServiceClientIntegration.java   |  10 +-
 19 files changed, 1272 insertions(+), 985 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fa1afdb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 0cc605c..285d36e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -72,13 +72,12 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.node.JsonNodeFactory;
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
 
@@ -90,8 +89,6 @@ import com.sun.jersey.api.client.ClientHandlerException;
  */
 public class JobHistoryEventHandler extends AbstractService
 implements EventHandler {
-  private static final JsonNodeFactory FACTORY =
-  new ObjectMapper().getNodeFactory();
 
   private final AppContext context;
   private final int startCount;
@@ -133,9 +130,10 @@ public class JobHistoryEventHandler extends AbstractService
   // should job completion be force when the AM shuts down?
   protected volatile boolean forceJobCompletion = false;
 
+  @VisibleForTesting
   protected TimelineClient timelineClient;
-
-  private boolean timelineServiceV2Enabled = false;
+  @VisibleForTesting
+  protected TimelineV2Client timelineV2Client;
 
   private static String MAPREDUCE_JOB_ENTITY_TYPE = "MAPREDUCE_JOB";
   private static String MAPREDUCE_TASK_ENTITY_TYPE = "MAPREDUCE_TASK";
@@ -268,12 +266,17 @@ public class JobHistoryEventHandler extends 
AbstractService
 MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)) {
   LOG.info("Emitting job history data to the timeline service is enabled");
   if (YarnConfiguration.timelineServiceEnabled(conf)) {
-
-timelineClient 

[1/2] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-16 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5690b51ef -> 4fa1afdb8


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fa1afdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
new file mode 100644
index 000..b5b5f77
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
@@ -0,0 +1,440 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api.impl;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.ConnectException;
+import java.net.HttpURLConnection;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URL;
+import java.net.URLConnection;
+import java.security.GeneralSecurityException;
+import java.security.PrivilegedExceptionAction;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticator;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientRequest;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.api.client.filter.ClientFilter;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+
+/**
+ * Utility Connector class which is used by timeline clients to securely get
+ * connected to the timeline server.
+ *
+ */
+public class TimelineConnector extends AbstractService {
+
+  private static final Joiner JOINER = Joiner.on("");
+  private static final Log LOG = LogFactory.getLog(TimelineConnector.class);
+  public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
+
+  private SSLFactory sslFactory;
+  private Client client;
+  private ConnectionConfigurator connConfigurator;
+  private DelegationTokenAuthenticator authenticator;
+  private DelegationTokenAuthenticatedURL.Token token;
+  private UserGroupInformation authUgi;
+  private String doAsUser;
+  @VisibleForTesting
+  TimelineClientConnectionRetry connectionRetry;
+  private boolean requireConnectionRetry;
+
+  public TimelineConnector(boolean requireConnectionRetry,
+  UserGroupInformation authUgi, String doAsUser,
+  DelegationTokenAuthenticatedURL.Token token) {

hadoop git commit: HDFS-11100. Recursively deleting file protected by sticky bit should fail. Contributed by John Zhuge.

2017-02-16 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk e63a7814d -> 5690b51ef


HDFS-11100. Recursively deleting file protected by sticky bit should fail. 
Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5690b51e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5690b51e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5690b51e

Branch: refs/heads/trunk
Commit: 5690b51ef7c708c0a71162ddaff04466bc71cdcc
Parents: e63a781
Author: Wei-Chiu Chuang 
Authored: Thu Feb 16 05:39:37 2017 -0800
Committer: Wei-Chiu Chuang 
Committed: Thu Feb 16 05:39:37 2017 -0800

--
 .../apache/hadoop/fs/FSExceptionMessages.java   |  3 +
 .../server/namenode/FSPermissionChecker.java| 87 +---
 .../hadoop/fs/permission/TestStickyBit.java | 63 ++
 3 files changed, 142 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5690b51e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
index 1511bb0..a8e7b71 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
@@ -48,4 +48,7 @@ public class FSExceptionMessages {
   = "Requested more bytes than destination buffer size";
 
   public static final String PERMISSION_DENIED = "Permission denied";
+
+  public static final String PERMISSION_DENIED_BY_STICKY_BIT =
+  "Permission denied by sticky bit";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5690b51e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index 107d563..f1250dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -18,11 +18,15 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
 import java.util.Stack;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
@@ -280,9 +284,20 @@ class FSPermissionChecker implements AccessControlEnforcer 
{
   return;
 }
 
+// Each inode in the subtree has a level. The root inode has level 0.
+// List subINodePath tracks the inode path in the subtree during
+// traversal. The root inode is not stored because it is already in array
+// components. The list index is (level - 1).
+ArrayList subINodePath = new ArrayList<>();
+
+// The stack of levels matches the stack of directory inodes.
+Stack levels = new Stack<>();
+levels.push(0);// Level 0 is the root
+
 Stack directories = new Stack();
 for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
   INodeDirectory d = directories.pop();
+  int level = levels.pop();
   ReadOnlyList cList = d.getChildrenList(snapshotId);
   if (!(cList.isEmpty() && ignoreEmptyDir)) {
 //TODO have to figure this out with inodeattribute provider
@@ -292,11 +307,44 @@ class FSPermissionChecker implements 
AccessControlEnforcer {
   throw new AccessControlException(
   toAccessControlString(inodeAttr, d.getFullPathName(), access));
 }
+
+if (level > 0) {
+  if (level - 1 < subINodePath.size()) {
+subINodePath.set(level - 1, d);
+  } else {
+Preconditions.checkState(level - 1 == subINodePath.size());
+subINodePath.add(d);
+  }
+}
+
+if (inodeAttr.getFsPermission().getStickyBit()) {
+  for (INode child : cList) {
+INodeAttributes childInodeAttr =
+  

hadoop git commit: YARN-6200. addendum to fix compilation error caused by reverting YARN-5068.

2017-02-16 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk a393e84c6 -> e63a7814d


YARN-6200. addendum to fix compilation error caused by reverting YARN-5068.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e63a7814
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e63a7814
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e63a7814

Branch: refs/heads/trunk
Commit: e63a7814d21c6469adb01a3a93cfb3ed7613437d
Parents: a393e84
Author: Rohith Sharma K S 
Authored: Thu Feb 16 18:19:57 2017 +0530
Committer: Rohith Sharma K S 
Committed: Thu Feb 16 18:19:57 2017 +0530

--
 .../hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e63a7814/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index 05f9f47..7051f8c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.client.NMProxy;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6200. Reverting since the same functionality achieved by YARN-1623. Revert "YARN-5068. Expose scheduler queue to application master. (Harish Jaiprakash via rohithsharmaks)"

2017-02-16 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk 74dd14225 -> a393e84c6


YARN-6200. Reverting since the same functionality achieved by YARN-1623.
Revert "YARN-5068. Expose scheduler queue to application master. (Harish 
Jaiprakash via rohithsharmaks)"

This reverts commit b7ac85259c7d20c33bef9c9cb40b8aabcab70755.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a393e84c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a393e84c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a393e84c

Branch: refs/heads/trunk
Commit: a393e84c6f57d50a471fc902dfd07ca1b4128a0e
Parents: 74dd142
Author: Rohith Sharma K S 
Authored: Thu Feb 16 18:00:25 2017 +0530
Committer: Rohith Sharma K S 
Committed: Thu Feb 16 18:00:25 2017 +0530

--
 .../apache/hadoop/yarn/api/ApplicationConstants.java  |  7 ---
 .../server/resourcemanager/amlauncher/AMLauncher.java | 14 --
 .../TestApplicationMasterLauncher.java|  5 -
 3 files changed, 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a393e84c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index 760e251..64bcc44 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -160,13 +160,6 @@ public interface ApplicationConstants {
 LD_LIBRARY_PATH("LD_LIBRARY_PATH"),
 
 /**
- * $YARN_RESOURCEMANAGER_APPLICATION_QUEUE
- * The queue into which the app was submitted/launched.
- */
-YARN_RESOURCEMANAGER_APPLICATION_QUEUE(
-"YARN_RESOURCEMANAGER_APPLICATION_QUEUE"),
-
-/**
  * $HADOOP_CONF_DIR
  * Final, non-modifiable.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a393e84c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index d33360b..05f9f47 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.client.NMProxy;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
@@ -189,9 +188,6 @@ public class AMLauncher implements Runnable {
 ContainerLaunchContext container =
 applicationMasterContext.getAMContainerSpec();
 
-// Populate the current queue name in the environment variable.
-setupQueueNameEnv(container, applicationMasterContext);
-
 // Finalize the container
 setupTokens(container, containerID);
 // set the flow context optionally for timeline service v.2
@@ -200,16 +196,6 @@ public class AMLauncher implements Runnable {
 return container;
   }
 
-  private void setupQueueNameEnv(ContainerLaunchContext container,
-  ApplicationSubmissionContext applicationMasterContext) {
-String queueName = applicationMasterContext.getQueue();
-if (queueName == null) {
-  queueName = YarnConfiguration.DEFAULT_QUEUE_NAME;
-}
-container.getEnvironment().put(ApplicationConstants.Environment
-.YARN_RESOURCEMANAGER_APPLICATION_QUEUE.key(), queueName);
-  }
-
   @Private
   @VisibleForTesting
   protected void setupTokens(


hadoop git commit: YARN-6200. Reverting since the same functionality achieved by YARN-1623. Revert "YARN-5068. Expose scheduler queue to application master. (Harish Jaiprakash via rohithsharmaks)"

2017-02-16 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9fecd5063 -> f07b52dc5


YARN-6200. Reverting since the same functionality achieved by YARN-1623.
Revert "YARN-5068. Expose scheduler queue to application master. (Harish 
Jaiprakash via rohithsharmaks)"

This reverts commit a37b3694ead2ab4e8753e68213f31868b68376b8.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f07b52dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f07b52dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f07b52dc

Branch: refs/heads/branch-2
Commit: f07b52dc5ae117a337b71829ffbd8cce8d6ad49f
Parents: 9fecd50
Author: Rohith Sharma K S 
Authored: Thu Feb 16 17:58:38 2017 +0530
Committer: Rohith Sharma K S 
Committed: Thu Feb 16 17:58:38 2017 +0530

--
 .../apache/hadoop/yarn/api/ApplicationConstants.java  |  7 ---
 .../server/resourcemanager/amlauncher/AMLauncher.java | 14 --
 .../TestApplicationMasterLauncher.java|  5 -
 3 files changed, 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f07b52dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index 168e291..6bd984b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -171,13 +171,6 @@ public interface ApplicationConstants {
 LD_LIBRARY_PATH("LD_LIBRARY_PATH"),
 
 /**
- * $YARN_RESOURCEMANAGER_APPLICATION_QUEUE
- * The queue into which the app was submitted/launched.
- */
-YARN_RESOURCEMANAGER_APPLICATION_QUEUE(
-"YARN_RESOURCEMANAGER_APPLICATION_QUEUE"),
-
-/**
  * $HADOOP_CONF_DIR
  * Final, non-modifiable.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f07b52dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index e513198..8b44bd9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.client.NMProxy;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
@@ -187,25 +186,12 @@ public class AMLauncher implements Runnable {
 ContainerLaunchContext container = 
 applicationMasterContext.getAMContainerSpec();
 
-// Populate the current queue name in the environment variable.
-setupQueueNameEnv(container, applicationMasterContext);
-
 // Finalize the container
 setupTokens(container, containerID);
 
 return container;
   }
 
-  private void setupQueueNameEnv(ContainerLaunchContext container,
-  ApplicationSubmissionContext applicationMasterContext) {
-String queueName = applicationMasterContext.getQueue();
-if (queueName == null) {
-  queueName = YarnConfiguration.DEFAULT_QUEUE_NAME;
-}
-container.getEnvironment().put(ApplicationConstants.Environment
-.YARN_RESOURCEMANAGER_APPLICATION_QUEUE.key(), queueName);
-  }
-
   @Private
   @VisibleForTesting
   protected void setupTokens(


hadoop git commit: YARN-6200. Reverting since the same functionality achieved by YARN-1623. Revert "YARN-5068. Expose scheduler queue to application master. (Harish Jaiprakash via rohithsharmaks)"

2017-02-16 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 4c47cb68e -> da944fe0f


YARN-6200. Reverting since the same functionality achieved by YARN-1623.
Revert "YARN-5068. Expose scheduler queue to application master. (Harish 
Jaiprakash via rohithsharmaks)"

This reverts commit f102c4f6c8b5a38f73e41282215e9bc38240eed8.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da944fe0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da944fe0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da944fe0

Branch: refs/heads/branch-2.8
Commit: da944fe0f44e2bb2cd63c65aaa79f7d28ca6313a
Parents: 4c47cb6
Author: Rohith Sharma K S 
Authored: Thu Feb 16 17:54:37 2017 +0530
Committer: Rohith Sharma K S 
Committed: Thu Feb 16 17:54:37 2017 +0530

--
 .../apache/hadoop/yarn/api/ApplicationConstants.java  |  7 ---
 .../server/resourcemanager/amlauncher/AMLauncher.java | 14 --
 .../TestApplicationMasterLauncher.java|  5 -
 3 files changed, 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da944fe0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index ad526d6..b2d765a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -171,13 +171,6 @@ public interface ApplicationConstants {
 LD_LIBRARY_PATH("LD_LIBRARY_PATH"),
 
 /**
- * $YARN_RESOURCEMANAGER_APPLICATION_QUEUE
- * The queue into which the app was submitted/launched.
- */
-YARN_RESOURCEMANAGER_APPLICATION_QUEUE(
-"YARN_RESOURCEMANAGER_APPLICATION_QUEUE"),
-
-/**
  * $HADOOP_CONF_DIR
  * Final, non-modifiable.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da944fe0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index e513198..8b44bd9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.client.NMProxy;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
@@ -187,25 +186,12 @@ public class AMLauncher implements Runnable {
 ContainerLaunchContext container = 
 applicationMasterContext.getAMContainerSpec();
 
-// Populate the current queue name in the environment variable.
-setupQueueNameEnv(container, applicationMasterContext);
-
 // Finalize the container
 setupTokens(container, containerID);
 
 return container;
   }
 
-  private void setupQueueNameEnv(ContainerLaunchContext container,
-  ApplicationSubmissionContext applicationMasterContext) {
-String queueName = applicationMasterContext.getQueue();
-if (queueName == null) {
-  queueName = YarnConfiguration.DEFAULT_QUEUE_NAME;
-}
-container.getEnvironment().put(ApplicationConstants.Environment
-.YARN_RESOURCEMANAGER_APPLICATION_QUEUE.key(), queueName);
-  }
-
   @Private
   @VisibleForTesting
   protected void setupTokens(


hadoop git commit: YARN-6200. Reverting since the same functionality achieved by YARN-1623 Revert "YARN-5068. Expose scheduler queue to application master. (Harish Jaiprakash via rohithsharmaks)"

2017-02-16 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.0 cf91cc2b0 -> e53c4a6d6


YARN-6200. Reverting since the same functionality achieved by YARN-1623
Revert "YARN-5068. Expose scheduler queue to application master. (Harish 
Jaiprakash via rohithsharmaks)"

This reverts commit f102c4f6c8b5a38f73e41282215e9bc38240eed8.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e53c4a6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e53c4a6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e53c4a6d

Branch: refs/heads/branch-2.8.0
Commit: e53c4a6d69fdfcdc41afa38e51f1d9a06fb10f2a
Parents: cf91cc2
Author: Rohith Sharma K S 
Authored: Thu Feb 16 17:47:46 2017 +0530
Committer: Rohith Sharma K S 
Committed: Thu Feb 16 17:47:46 2017 +0530

--
 .../hadoop/yarn/api/ApplicationConstants.java   |  7 ---
 .../resourcemanager/amlauncher/AMLauncher.java  | 16 +---
 .../TestApplicationMasterLauncher.java  |  5 -
 3 files changed, 1 insertion(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e53c4a6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index ad526d6..b2d765a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -171,13 +171,6 @@ public interface ApplicationConstants {
 LD_LIBRARY_PATH("LD_LIBRARY_PATH"),
 
 /**
- * $YARN_RESOURCEMANAGER_APPLICATION_QUEUE
- * The queue into which the app was submitted/launched.
- */
-YARN_RESOURCEMANAGER_APPLICATION_QUEUE(
-"YARN_RESOURCEMANAGER_APPLICATION_QUEUE"),
-
-/**
  * $HADOOP_CONF_DIR
  * Final, non-modifiable.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e53c4a6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index e513198..a81648b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.client.NMProxy;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
@@ -187,25 +186,12 @@ public class AMLauncher implements Runnable {
 ContainerLaunchContext container = 
 applicationMasterContext.getAMContainerSpec();
 
-// Populate the current queue name in the environment variable.
-setupQueueNameEnv(container, applicationMasterContext);
-
 // Finalize the container
 setupTokens(container, containerID);
-
+
 return container;
   }
 
-  private void setupQueueNameEnv(ContainerLaunchContext container,
-  ApplicationSubmissionContext applicationMasterContext) {
-String queueName = applicationMasterContext.getQueue();
-if (queueName == null) {
-  queueName = YarnConfiguration.DEFAULT_QUEUE_NAME;
-}
-container.getEnvironment().put(ApplicationConstants.Environment
-.YARN_RESOURCEMANAGER_APPLICATION_QUEUE.key(), queueName);
-  }
-
   @Private
   @VisibleForTesting
   protected void setupTokens(


hadoop git commit: YARN-5798. Set UncaughtExceptionHandler for all FairScheduler threads. (Yufei Gu via kasha)

2017-02-16 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fefac1276 -> 9fecd5063


YARN-5798. Set UncaughtExceptionHandler for all FairScheduler threads. (Yufei 
Gu via kasha)

(cherry picked from commit 74dd14225059322825f706120aa57cf673820daf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fecd506
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fecd506
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fecd506

Branch: refs/heads/branch-2
Commit: 9fecd5063fc4a4490f9c6db31483e1c7eeb0d9f4
Parents: fefac12
Author: Karthik Kambatla 
Authored: Thu Feb 16 00:03:09 2017 -0800
Committer: Karthik Kambatla 
Committed: Thu Feb 16 00:03:26 2017 -0800

--
 .../server/resourcemanager/scheduler/fair/FairScheduler.java  | 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fecd506/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 5594140..f21b5f1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import 
org.apache.hadoop.yarn.server.resourcemanager.RMCriticalThreadUncaughtExceptionHandler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationConstants;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
@@ -1266,12 +1267,16 @@ public class FairScheduler extends
 
   updateThread = new UpdateThread();
   updateThread.setName("FairSchedulerUpdateThread");
+  updateThread.setUncaughtExceptionHandler(
+  new RMCriticalThreadUncaughtExceptionHandler(rmContext));
   updateThread.setDaemon(true);
 
   if (continuousSchedulingEnabled) {
 // start continuous scheduling thread
 schedulingThread = new ContinuousSchedulingThread();
 schedulingThread.setName("FairSchedulerContinuousScheduling");
+schedulingThread.setUncaughtExceptionHandler(
+new RMCriticalThreadUncaughtExceptionHandler(rmContext));
 schedulingThread.setDaemon(true);
   }
 
@@ -1297,6 +1302,8 @@ public class FairScheduler extends
   @VisibleForTesting
   protected void createPreemptionThread() {
 preemptionThread = new FSPreemptionThread(this);
+preemptionThread.setUncaughtExceptionHandler(
+new RMCriticalThreadUncaughtExceptionHandler(rmContext));
   }
 
   private void updateReservationThreshold() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5798. Set UncaughtExceptionHandler for all FairScheduler threads. (Yufei Gu via kasha)

2017-02-16 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 11be3f70e -> 74dd14225


YARN-5798. Set UncaughtExceptionHandler for all FairScheduler threads. (Yufei 
Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74dd1422
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74dd1422
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74dd1422

Branch: refs/heads/trunk
Commit: 74dd14225059322825f706120aa57cf673820daf
Parents: 11be3f7
Author: Karthik Kambatla 
Authored: Thu Feb 16 00:03:09 2017 -0800
Committer: Karthik Kambatla 
Committed: Thu Feb 16 00:03:09 2017 -0800

--
 .../server/resourcemanager/scheduler/fair/FairScheduler.java  | 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74dd1422/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 18806bc..c5bf02a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -51,6 +51,7 @@ import 
org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import 
org.apache.hadoop.yarn.server.resourcemanager.RMCriticalThreadUncaughtExceptionHandler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationConstants;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
@@ -1268,12 +1269,16 @@ public class FairScheduler extends
 
   updateThread = new UpdateThread();
   updateThread.setName("FairSchedulerUpdateThread");
+  updateThread.setUncaughtExceptionHandler(
+  new RMCriticalThreadUncaughtExceptionHandler(rmContext));
   updateThread.setDaemon(true);
 
   if (continuousSchedulingEnabled) {
 // start continuous scheduling thread
 schedulingThread = new ContinuousSchedulingThread();
 schedulingThread.setName("FairSchedulerContinuousScheduling");
+schedulingThread.setUncaughtExceptionHandler(
+new RMCriticalThreadUncaughtExceptionHandler(rmContext));
 schedulingThread.setDaemon(true);
   }
 
@@ -1299,6 +1304,8 @@ public class FairScheduler extends
   @VisibleForTesting
   protected void createPreemptionThread() {
 preemptionThread = new FSPreemptionThread(this);
+preemptionThread.setUncaughtExceptionHandler(
+new RMCriticalThreadUncaughtExceptionHandler(rmContext));
   }
 
   private void updateReservationThreshold() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org