hadoop git commit: HDDS-642. Add chill mode exit condition for pipeline availability. Contributed by Yiqun Lin.

2018-11-28 Thread ajay
Repository: hadoop
Updated Branches:
  refs/heads/trunk efc4d91cb -> b71cc7f33


HDDS-642. Add chill mode exit condition for pipeline availability. Contributed 
by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b71cc7f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b71cc7f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b71cc7f3

Branch: refs/heads/trunk
Commit: b71cc7f33edbbf6a98d1efb330f1c748b5dd6e75
Parents: efc4d91
Author: Ajay Kumar 
Authored: Wed Nov 28 17:45:46 2018 -0800
Committer: Ajay Kumar 
Committed: Wed Nov 28 17:47:57 2018 -0800

--
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   5 +
 .../common/src/main/resources/ozone-default.xml |   9 ++
 .../scm/chillmode/PipelineChillModeRule.java| 108 +++
 .../hdds/scm/chillmode/SCMChillModeManager.java |  19 +++-
 .../scm/server/StorageContainerManager.java |   5 +-
 .../scm/chillmode/TestSCMChillModeManager.java  |  81 --
 6 files changed, 213 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b71cc7f3/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 2d28a5b..f16503e 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -87,6 +87,11 @@ public final class HddsConfigKeys {
   "hdds.scm.chillmode.min.datanode";
   public static final int HDDS_SCM_CHILLMODE_MIN_DATANODE_DEFAULT = 1;
 
+  public static final String HDDS_SCM_CHILLMODE_PIPELINE_AVAILABILITY_CHECK =
+  "hdds.scm.chillmode.pipeline-availability.check";
+  public static final boolean
+  HDDS_SCM_CHILLMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT = false;
+
   // % of containers which should have at least one reported replica
   // before SCM comes out of chill mode.
   public static final String HDDS_SCM_CHILLMODE_THRESHOLD_PCT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b71cc7f3/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 9f3d7e1..aa22b2b 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1232,6 +1232,15 @@
   
 
   
+hdds.scm.chillmode.pipeline-availability.check
+false
+HDDS,SCM,OPERATION
+
+  Boolean value to enable pipeline availability check during SCM chill 
mode.
+
+  
+
+  
 hdds.container.action.max.limit
 20
 DATANODE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b71cc7f3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/PipelineChillModeRule.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/PipelineChillModeRule.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/PipelineChillModeRule.java
new file mode 100644
index 000..f9a6e59
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/PipelineChillModeRule.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.chillmode;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import 

hadoop git commit: YARN-9067. YARN Resource Manager is running OOM because of leak of Configuration Object. Contributed by Eric Yang.

2018-11-28 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/trunk fe7dab8ef -> efc4d91cb


YARN-9067. YARN Resource Manager is running OOM because of leak of 
Configuration Object. Contributed by Eric Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/efc4d91c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/efc4d91c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/efc4d91c

Branch: refs/heads/trunk
Commit: efc4d91cbeab8a13f6d61cb0e56443adb2d77559
Parents: fe7dab8
Author: Weiwei Yang 
Authored: Thu Nov 29 09:34:14 2018 +0800
Committer: Weiwei Yang 
Committed: Thu Nov 29 09:34:14 2018 +0800

--
 .../hadoop/yarn/service/webapp/ApiServer.java   | 209 +++
 .../hadoop/yarn/service/ServiceClientTest.java  |   2 +-
 .../yarn/service/client/ServiceClient.java  |   1 +
 3 files changed, 126 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/efc4d91c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index db831ba..88aeefd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -118,10 +118,13 @@ public class ApiServer {
   @Override
   public Void run() throws YarnException, IOException {
 ServiceClient sc = getServiceClient();
-sc.init(YARN_CONFIG);
-sc.start();
-sc.actionBuild(service);
-sc.close();
+try {
+  sc.init(YARN_CONFIG);
+  sc.start();
+  sc.actionBuild(service);
+} finally {
+  sc.close();
+}
 return null;
   }
 });
@@ -133,11 +136,14 @@ public class ApiServer {
   @Override
   public ApplicationId run() throws IOException, YarnException {
 ServiceClient sc = getServiceClient();
-sc.init(YARN_CONFIG);
-sc.start();
-ApplicationId applicationId = sc.actionCreate(service);
-sc.close();
-return applicationId;
+try {
+  sc.init(YARN_CONFIG);
+  sc.start();
+  ApplicationId applicationId = sc.actionCreate(service);
+  return applicationId;
+} finally {
+  sc.close();
+}
   }
 });
 serviceStatus.setDiagnostics("Application ID: " + applicationId);
@@ -245,29 +251,32 @@ public class ApiServer {
   public Integer run() throws Exception {
 int result = 0;
 ServiceClient sc = getServiceClient();
-sc.init(YARN_CONFIG);
-sc.start();
-Exception stopException = null;
 try {
-  result = sc.actionStop(appName, destroy);
-  if (result == EXIT_SUCCESS) {
-LOG.info("Successfully stopped service {}", appName);
-  }
-} catch (Exception e) {
-  LOG.info("Got exception stopping service", e);
-  stopException = e;
-}
-if (destroy) {
-  result = sc.actionDestroy(appName);
-  if (result == EXIT_SUCCESS) {
-LOG.info("Successfully deleted service {}", appName);
+  sc.init(YARN_CONFIG);
+  sc.start();
+  Exception stopException = null;
+  try {
+result = sc.actionStop(appName, destroy);
+if (result == EXIT_SUCCESS) {
+  LOG.info("Successfully stopped service {}", appName);
+}
+  } catch (Exception e) {
+LOG.info("Got exception stopping service", e);
+stopException = e;
   }
-} else {
-  if (stopException != null) {
-throw stopException;
+  if (destroy) {
+result = sc.actionDestroy(appName);
+if (result == EXIT_SUCCESS) {
+  LOG.info("Successfully deleted service {}", appName);
+}
+  } else {
+  

hadoop git commit: YARN-8934. [GPG] Add JvmMetricsInfo and pause monitor. Contributed by Bilwa S T.

2018-11-28 Thread botong
Repository: hadoop
Updated Branches:
  refs/heads/YARN-7402 e1017a676 -> d93507ef1


YARN-8934. [GPG] Add JvmMetricsInfo and pause monitor. Contributed by Bilwa S T.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d93507ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d93507ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d93507ef

Branch: refs/heads/YARN-7402
Commit: d93507ef1500559397eba75651d3d75fdf0cdade
Parents: e1017a6
Author: Botong Huang 
Authored: Wed Nov 28 15:04:30 2018 -0800
Committer: Botong Huang 
Committed: Wed Nov 28 15:04:30 2018 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 24 +++
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |  1 +
 .../hadoop/yarn/webapp/util/WebAppUtils.java| 10 +++
 .../src/main/resources/yarn-default.xml | 28 +
 .../GlobalPolicyGenerator.java  | 66 
 .../webapp/GPGController.java   | 44 +
 .../webapp/GPGOverviewBlock.java| 50 +++
 .../webapp/GPGOverviewPage.java | 52 +++
 .../globalpolicygenerator/webapp/GPGWebApp.java | 45 +
 .../globalpolicygenerator/webapp/NavBlock.java  | 42 +
 .../webapp/package-info.java| 24 +++
 11 files changed, 386 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93507ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index aa990d3..9a8e48e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3430,6 +3430,14 @@ public class YarnConfiguration extends Configuration {
   public static final String FEDERATION_GPG_PREFIX =
   FEDERATION_PREFIX + "gpg.";
 
+  public static final String GPG_WEBAPP_PREFIX = FEDERATION_GPG_PREFIX
+  + "webapp.";
+
+  /** Enable/disable CORS filter. */
+  public static final String GPG_WEBAPP_ENABLE_CORS_FILTER =
+  GPG_WEBAPP_PREFIX + "cross-origin.enabled";
+  public static final boolean DEFAULT_GPG_WEBAPP_ENABLE_CORS_FILTER = false;
+
   // The number of threads to use for the GPG scheduled executor service
   public static final String GPG_SCHEDULED_EXECUTOR_THREADS =
   FEDERATION_GPG_PREFIX + "scheduled.executor.threads";
@@ -3457,6 +3465,22 @@ public class YarnConfiguration extends Configuration {
   FEDERATION_GPG_PREFIX + "application.cleaner.interval-ms";
   public static final long DEFAULT_GPG_APPCLEANER_INTERVAL_MS = -1;
 
+  /** The address of the GPG web application. */
+  public static final String GPG_WEBAPP_ADDRESS =
+  GPG_WEBAPP_PREFIX + "address";
+
+  public static final int DEFAULT_GPG_WEBAPP_PORT = 8069;
+  public static final String DEFAULT_GPG_WEBAPP_ADDRESS =
+  "0.0.0.0:" + DEFAULT_GPG_WEBAPP_PORT;
+
+  /** The https address of the GPG web application. */
+  public static final String GPG_WEBAPP_HTTPS_ADDRESS =
+  GPG_WEBAPP_PREFIX + "https.address";
+
+  public static final int DEFAULT_GPG_WEBAPP_HTTPS_PORT = 8070;
+  public static final String DEFAULT_GPG_WEBAPP_HTTPS_ADDRESS =
+  "0.0.0.0:" + DEFAULT_GPG_WEBAPP_HTTPS_PORT;
+
   /**
* Specifications on how (many times) to contact Router for apps. We need to
* do this because Router might return partial application list because some

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93507ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 641a5f0..dc98113 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -237,6 +237,7 @@
 src/main/resources/webapps/test/.keep
 src/main/resources/webapps/proxy/.keep
 src/main/resources/webapps/node/.keep
+src/main/resources/webapps/gpg/.keep
 
src/main/resources/webapps/static/dt-1.10.18/css/jquery.dataTables.css
 
src/main/resources/webapps/static/dt-1.10.18/css/custom_datatable.css
 
src/main/resources/webapps/static/dt-1.10.18/css/jui-dt.css


[2/2] hadoop git commit: Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

2018-11-28 Thread shv
Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

(cherry picked from commit 351cf87c92872d90f62c476f85ae4d02e485769c)
(cherry picked from commit d61d84279f7f22867c23dd95e8bfeb70ea7e0690)
(cherry picked from commit f5fd5aa025c904e9a2ff8c5fd932aaed2363a6a0)
(cherry picked from commit e20a840174bc2b27fcc0935e0086977bd6fbfcb3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1569cc62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1569cc62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1569cc62

Branch: refs/heads/branch-2.7
Commit: 1569cc62cda563fed1db26c3e47d884abe1c5a45
Parents: af3b37d
Author: Andrew Purtell 
Authored: Mon Nov 12 12:40:47 2018 -0800
Committer: Konstantin V Shvachko 
Committed: Wed Nov 28 16:49:40 2018 -0800

--
 .../impl/container-executor.c   | 83 +---
 .../impl/container-executor.h   |  7 +-
 .../main/native/container-executor/impl/main.c  | 23 --
 .../src/site/markdown/NodeManagerCgroups.md | 10 +++
 4 files changed, 103 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1569cc62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 9601264..b262ef3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -62,6 +62,8 @@ static const int DEFAULT_MIN_USERID = 1000;
 
 static const char* DEFAULT_BANNED_USERS[] = {"mapred", "hdfs", "bin", 0};
 
+static const int DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED = 0;
+
 //struct to store the user details
 struct passwd *user_detail = NULL;
 
@@ -984,6 +986,36 @@ int create_log_dirs(const char *app_id, char * const * 
log_dirs) {
 }
 
 
+static int is_feature_enabled(const char* feature_key, int default_value) {
+char *enabled_str = get_value(feature_key, _cfg);
+int enabled = default_value;
+
+if (enabled_str != NULL) {
+char *end_ptr = NULL;
+enabled = strtol(enabled_str, _ptr, 10);
+
+if ((enabled_str == end_ptr || *end_ptr != '\0') ||
+(enabled < 0 || enabled > 1)) {
+  fprintf(LOGFILE, "Illegal value '%s' for '%s' in configuration. "
+  "Using default value: %d.\n", enabled_str, feature_key,
+  default_value);
+  fflush(LOGFILE);
+  free(enabled_str);
+  return default_value;
+}
+
+free(enabled_str);
+return enabled;
+} else {
+return default_value;
+}
+}
+
+int is_mount_cgroups_support_enabled() {
+return is_feature_enabled(MOUNT_CGROUP_SUPPORT_ENABLED_KEY,
+  DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED);
+}
+
 /**
  * Function to prepare the application directories for the container.
  */
@@ -1466,20 +1498,25 @@ void chown_dir_contents(const char *dir_path, uid_t 
uid, gid_t gid) {
   DIR *dp;
   struct dirent *ep;
 
-  char *path_tmp = malloc(strlen(dir_path) + NAME_MAX + 2);
+  size_t len = strlen(dir_path) + NAME_MAX + 2;
+  char *path_tmp = malloc(len);
   if (path_tmp == NULL) {
 return;
   }
 
-  char *buf = stpncpy(path_tmp, dir_path, strlen(dir_path));
-  *buf++ = '/';
-
   dp = opendir(dir_path);
   if (dp != NULL) {
-while (ep = readdir(dp)) {
-  stpncpy(buf, ep->d_name, strlen(ep->d_name));
-  buf[strlen(ep->d_name)] = '\0';
-  change_owner(path_tmp, uid, gid);
+while ((ep = readdir(dp)) != NULL) {
+  if (strcmp(ep->d_name, ".") != 0 &&
+  strcmp(ep->d_name, "..") != 0 &&
+  strstr(ep->d_name, "..") == NULL) {
+int result = snprintf(path_tmp, len, "%s/%s", dir_path, ep->d_name);
+if (result > 0 && result < len) {
+  change_owner(path_tmp, uid, gid);
+} else {
+  fprintf(LOGFILE, "Ignored %s/%s due to length", dir_path, 
ep->d_name);
+}
+  }
 }
 closedir(dp);
   }
@@ -1502,13 +1539,29 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   char *mount_path = malloc(strlen(pair));
   char hier_path[EXECUTOR_PATH_MAX];
   int result = 0;
+  size_t len = strlen(pair);
 
-  if (get_kv_key(pair, 

[1/2] hadoop git commit: YARN-4245. Generalize config file handling in container-executor. Contributed by Sidharta Seethana.

2018-11-28 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 29d969eff -> 1569cc62c


YARN-4245. Generalize config file handling in container-executor. Contributed 
by Sidharta Seethana.

(cherry picked from commit 8ed2e060e80c0def3fcb7604e0bd27c1c24d291e)
(cherry picked from commit 78919f8c341ec645cf9134991e3ae89a929b9184)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af3b37d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af3b37d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af3b37d0

Branch: refs/heads/branch-2.7
Commit: af3b37d0cc7628ab6020411720174e9936fc876a
Parents: 29d969e
Author: Andrew Purtell 
Authored: Mon Nov 12 20:52:47 2018 +
Committer: Konstantin V Shvachko 
Committed: Wed Nov 28 16:49:00 2018 -0800

--
 hadoop-yarn-project/CHANGES.txt |  17 ++-
 .../container-executor/impl/configuration.c | 124 ++-
 .../container-executor/impl/configuration.h |  28 -
 .../impl/container-executor.c   |  23 +++-
 .../impl/container-executor.h   |  13 +-
 .../main/native/container-executor/impl/main.c  |   4 +-
 .../test/test-container-executor.c  |   8 +-
 7 files changed, 141 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af3b37d0/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f32c656..d84719b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1,6 +1,21 @@
 Hadoop YARN Change Log
 
-elease 2.7.7 - 2018-07-18
+Release 2.7.8 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+YARN-4245. Generalize config file handling in container-executor.
+(Sidharta Seethana, Andrew Purtell via shv).
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.7.7 - 2018-07-18
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af3b37d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
index 7645d86..1667b0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
@@ -33,34 +33,22 @@
 
 #define MAX_SIZE 10
 
-struct confentry {
-  const char *key;
-  const char *value;
-};
-
-struct configuration {
-  int size;
-  struct confentry **confdetails;
-};
-
-struct configuration config={.size=0, .confdetails=NULL};
-
 //clean up method for freeing configuration
-void free_configurations() {
+void free_configurations(struct configuration *cfg) {
   int i = 0;
-  for (i = 0; i < config.size; i++) {
-if (config.confdetails[i]->key != NULL) {
-  free((void *)config.confdetails[i]->key);
+  for (i = 0; i < cfg->size; i++) {
+if (cfg->confdetails[i]->key != NULL) {
+  free((void *)cfg->confdetails[i]->key);
 }
-if (config.confdetails[i]->value != NULL) {
-  free((void *)config.confdetails[i]->value);
+if (cfg->confdetails[i]->value != NULL) {
+  free((void *)cfg->confdetails[i]->value);
 }
-free(config.confdetails[i]);
+free(cfg->confdetails[i]);
   }
-  if (config.size > 0) {
-free(config.confdetails);
+  if (cfg->size > 0) {
+free(cfg->confdetails);
   }
-  config.size = 0;
+  cfg->size = 0;
 }
 
 /**
@@ -137,8 +125,8 @@ int check_configuration_permissions(const char* file_name) {
   return 0;
 }
 
-//function used to load the configurations present in the secure config
-void read_config(const char* file_name) {
+
+void read_config(const char* file_name, struct configuration *cfg) {
   FILE *conf_file;
   char *line;
   char *equaltok;
@@ -156,9 +144,9 @@ void read_config(const char* file_name) {
   #endif
 
   //allocate space for ten configuration items.
-  config.confdetails = (struct confentry **) malloc(sizeof(struct confentry *)
+  cfg->confdetails = (struct confentry **) malloc(sizeof(struct confentry *)
   * MAX_SIZE);
-  config.size = 0;
+  cfg->size = 0;
   conf_file = fopen(file_name, "r");
   if (conf_file == NULL) {
 fprintf(ERRORFILE, "Invalid conf file provided : %s \n", file_name);
@@ -200,9 

hadoop git commit: YARN-9030. Log aggregation changes to handle filesystems which do not support setting permissions. (Suma Shivaprasad via wangda)

2018-11-28 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 7a78bdf7b -> ee9deb6e9


YARN-9030. Log aggregation changes to handle filesystems which do not support 
setting permissions. (Suma Shivaprasad via wangda)

Change-Id: I80f1e8196b8624e24d74494719fdedfd7061dced
(cherry picked from commit 9de8e8d0496a2628b63cc841b1fdee80e2912f7a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee9deb6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee9deb6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee9deb6e

Branch: refs/heads/branch-3.2
Commit: ee9deb6e9ccebcf6ef2660e32b0d26f458b3ee5d
Parents: 7a78bdf
Author: Wangda Tan 
Authored: Wed Nov 21 17:28:37 2018 -0800
Committer: Wangda Tan 
Committed: Wed Nov 28 15:06:35 2018 -0800

--
 .../LogAggregationFileController.java   | 74 +++-
 1 file changed, 56 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee9deb6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index fe65288..e37308d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -109,6 +109,8 @@ public abstract class LogAggregationFileController {
   protected int retentionSize;
   protected String fileControllerName;
 
+  protected boolean fsSupportsChmod = true;
+
   public LogAggregationFileController() {}
 
   /**
@@ -250,7 +252,6 @@ public abstract class LogAggregationFileController {
* Verify and create the remote log directory.
*/
   public void verifyAndCreateRemoteLogDir() {
-boolean logPermError = true;
 // Checking the existence of the TLD
 FileSystem remoteFS = null;
 try {
@@ -264,14 +265,12 @@ public abstract class LogAggregationFileController {
 try {
   FsPermission perms =
   remoteFS.getFileStatus(remoteRootLogDir).getPermission();
-  if (!perms.equals(TLDIR_PERMISSIONS) && logPermError) {
+  if (!perms.equals(TLDIR_PERMISSIONS)) {
 LOG.warn("Remote Root Log Dir [" + remoteRootLogDir
 + "] already exist, but with incorrect permissions. "
 + "Expected: [" + TLDIR_PERMISSIONS + "], Found: [" + perms
 + "]." + " The cluster may have problems with multiple users.");
-logPermError = false;
-  } else {
-logPermError = true;
+
   }
 } catch (FileNotFoundException e) {
   remoteExists = false;
@@ -280,15 +279,26 @@ public abstract class LogAggregationFileController {
   "Failed to check permissions for dir ["
   + remoteRootLogDir + "]", e);
 }
+
+Path qualified =
+remoteRootLogDir.makeQualified(remoteFS.getUri(),
+remoteFS.getWorkingDirectory());
 if (!remoteExists) {
   LOG.warn("Remote Root Log Dir [" + remoteRootLogDir
   + "] does not exist. Attempting to create it.");
   try {
-Path qualified =
-remoteRootLogDir.makeQualified(remoteFS.getUri(),
-remoteFS.getWorkingDirectory());
 remoteFS.mkdirs(qualified, new FsPermission(TLDIR_PERMISSIONS));
-remoteFS.setPermission(qualified, new FsPermission(TLDIR_PERMISSIONS));
+
+// Not possible to query FileSystem API to check if it supports
+// chmod, chown etc. Hence resorting to catching exceptions here.
+// Remove when FS APi is ready
+try {
+  remoteFS.setPermission(qualified, new 
FsPermission(TLDIR_PERMISSIONS));
+} catch ( UnsupportedOperationException use) {
+  LOG.info("Unable to set permissions for configured filesystem since"
+  + " it does not support this", remoteFS.getScheme());
+  fsSupportsChmod = false;
+}
 
 UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
 String primaryGroupName = null;
@@ -301,13 +311,31 @@ public abstract class LogAggregationFileController {
 }
 // set owner on the remote directory only if the primary group exists
 if (primaryGroupName != null) {
-  remoteFS.setOwner(qualified,
-  

[3/6] hadoop git commit: YARN-9061. Improve the GPU/FPGA module log message of container-executor. (Zhankun Tang via wangda)

2018-11-28 Thread wangda
YARN-9061. Improve the GPU/FPGA module log message of container-executor. 
(Zhankun Tang via wangda)

Change-Id: Iece9b47438357077a53984a820d4d6423f480518


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ed87567
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ed87567
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ed87567

Branch: refs/heads/trunk
Commit: 9ed87567ad0f1c26a263ce6d8fba56d066260c5c
Parents: 579ef4b
Author: Wangda Tan 
Authored: Wed Nov 28 14:31:31 2018 -0800
Committer: Wangda Tan 
Committed: Wed Nov 28 14:31:31 2018 -0800

--
 .../native/container-executor/impl/modules/fpga/fpga-module.c   | 5 +++--
 .../native/container-executor/impl/modules/gpu/gpu-module.c | 5 +++--
 2 files changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ed87567/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/fpga/fpga-module.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/fpga/fpga-module.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/fpga/fpga-module.c
index c1a2f83..e947d7c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/fpga/fpga-module.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/fpga/fpga-module.c
@@ -141,7 +141,7 @@ void reload_fpga_configuration() {
 /*
  * Format of FPGA request commandline:
  *
- * c-e fpga --excluded_fpgas 0,1,3 --container_id container_x_y
+ * c-e --module-fpga --excluded_fpgas 0,1,3 --container_id container_x_y
  */
 int handle_fpga_request(update_cgroups_parameters_function func,
 const char* module_name, int module_argc, char** module_argv) {
@@ -213,7 +213,8 @@ int handle_fpga_request(update_cgroups_parameters_function 
func,
 
   if (!minor_devices) {
  // Minor devices is null, skip following call.
- fprintf(ERRORFILE, "is not specified, skip cgroups call.\n");
+ fprintf(ERRORFILE,
+ "--excluded-fpgas is not specified, skip cgroups call.\n");
  goto cleanup;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ed87567/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
index 1a1b164..7522338 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
@@ -141,7 +141,7 @@ void reload_gpu_configuration() {
 /*
  * Format of GPU request commandline:
  *
- * c-e gpu --excluded_gpus 0,1,3 --container_id container_x_y
+ * c-e --module-gpu --excluded_gpus 0,1,3 --container_id container_x_y
  */
 int handle_gpu_request(update_cgroups_parameters_func func,
 const char* module_name, int module_argc, char** module_argv) {
@@ -213,7 +213,8 @@ int handle_gpu_request(update_cgroups_parameters_func func,
 
   if (!minor_devices) {
  // Minor devices is null, skip following call.
- fprintf(ERRORFILE, "is not specified, skip cgroups call.\n");
+ fprintf(ERRORFILE,
+ "--excluded_gpus is not specified, skip cgroups call.\n");
  goto cleanup;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/6] hadoop git commit: YARN-9030. Log aggregation changes to handle filesystems which do not support setting permissions. (Suma Shivaprasad via wangda)

2018-11-28 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4d8de7ab6 -> fe7dab8ef


YARN-9030. Log aggregation changes to handle filesystems which do not support 
setting permissions. (Suma Shivaprasad via wangda)

Change-Id: I80f1e8196b8624e24d74494719fdedfd7061dced


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9de8e8d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9de8e8d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9de8e8d0

Branch: refs/heads/trunk
Commit: 9de8e8d0496a2628b63cc841b1fdee80e2912f7a
Parents: 4d8de7a
Author: Wangda Tan 
Authored: Wed Nov 21 17:28:37 2018 -0800
Committer: Wangda Tan 
Committed: Wed Nov 28 13:36:21 2018 -0800

--
 .../LogAggregationFileController.java   | 74 +++-
 1 file changed, 56 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9de8e8d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index fe65288..e37308d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -109,6 +109,8 @@ public abstract class LogAggregationFileController {
   protected int retentionSize;
   protected String fileControllerName;
 
+  protected boolean fsSupportsChmod = true;
+
   public LogAggregationFileController() {}
 
   /**
@@ -250,7 +252,6 @@ public abstract class LogAggregationFileController {
* Verify and create the remote log directory.
*/
   public void verifyAndCreateRemoteLogDir() {
-boolean logPermError = true;
 // Checking the existence of the TLD
 FileSystem remoteFS = null;
 try {
@@ -264,14 +265,12 @@ public abstract class LogAggregationFileController {
 try {
   FsPermission perms =
   remoteFS.getFileStatus(remoteRootLogDir).getPermission();
-  if (!perms.equals(TLDIR_PERMISSIONS) && logPermError) {
+  if (!perms.equals(TLDIR_PERMISSIONS)) {
 LOG.warn("Remote Root Log Dir [" + remoteRootLogDir
 + "] already exist, but with incorrect permissions. "
 + "Expected: [" + TLDIR_PERMISSIONS + "], Found: [" + perms
 + "]." + " The cluster may have problems with multiple users.");
-logPermError = false;
-  } else {
-logPermError = true;
+
   }
 } catch (FileNotFoundException e) {
   remoteExists = false;
@@ -280,15 +279,26 @@ public abstract class LogAggregationFileController {
   "Failed to check permissions for dir ["
   + remoteRootLogDir + "]", e);
 }
+
+Path qualified =
+remoteRootLogDir.makeQualified(remoteFS.getUri(),
+remoteFS.getWorkingDirectory());
 if (!remoteExists) {
   LOG.warn("Remote Root Log Dir [" + remoteRootLogDir
   + "] does not exist. Attempting to create it.");
   try {
-Path qualified =
-remoteRootLogDir.makeQualified(remoteFS.getUri(),
-remoteFS.getWorkingDirectory());
 remoteFS.mkdirs(qualified, new FsPermission(TLDIR_PERMISSIONS));
-remoteFS.setPermission(qualified, new FsPermission(TLDIR_PERMISSIONS));
+
+// Not possible to query FileSystem API to check if it supports
+// chmod, chown etc. Hence resorting to catching exceptions here.
+// Remove when FS APi is ready
+try {
+  remoteFS.setPermission(qualified, new 
FsPermission(TLDIR_PERMISSIONS));
+} catch ( UnsupportedOperationException use) {
+  LOG.info("Unable to set permissions for configured filesystem since"
+  + " it does not support this", remoteFS.getScheme());
+  fsSupportsChmod = false;
+}
 
 UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
 String primaryGroupName = null;
@@ -301,13 +311,31 @@ public abstract class LogAggregationFileController {
 }
 // set owner on the remote directory only if the primary group exists
 if (primaryGroupName != null) {
-  remoteFS.setOwner(qualified,
-  loginUser.getShortUserName(), primaryGroupName);
+  try {
+

[6/6] hadoop git commit: YARN-8989. [YARN-8851] Move DockerCommandPlugin volume related APIs' invocation from DockerLinuxContainerRuntime#prepareContainer to #launchContainer. (Zhankun Tang via wangda

2018-11-28 Thread wangda
YARN-8989. [YARN-8851] Move DockerCommandPlugin volume related APIs' invocation 
from DockerLinuxContainerRuntime#prepareContainer to #launchContainer. (Zhankun 
Tang via wangda)

Change-Id: Ia6d532c687168448416dfdf46f0ac34bff20e6ca


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe7dab8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe7dab8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe7dab8e

Branch: refs/heads/trunk
Commit: fe7dab8ef55f08cf18c2d62c782c1ab8930a5a15
Parents: 8976439
Author: Wangda Tan 
Authored: Wed Nov 28 14:55:16 2018 -0800
Committer: Wangda Tan 
Committed: Wed Nov 28 15:03:06 2018 -0800

--
 .../runtime/DockerLinuxContainerRuntime.java| 44 
 .../runtime/TestDockerContainerRuntime.java | 15 ---
 2 files changed, 24 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe7dab8e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 15ff0ff..225bc19 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -456,32 +456,6 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   @Override
   public void prepareContainer(ContainerRuntimeContext ctx)
   throws ContainerExecutionException {
-Container container = ctx.getContainer();
-
-// Create volumes when needed.
-if (nmContext != null
-&& nmContext.getResourcePluginManager().getNameToPlugins() != null) {
-  for (ResourcePlugin plugin : nmContext.getResourcePluginManager()
-  .getNameToPlugins().values()) {
-DockerCommandPlugin dockerCommandPlugin =
-plugin.getDockerCommandPluginInstance();
-if (dockerCommandPlugin != null) {
-  DockerVolumeCommand dockerVolumeCommand =
-  dockerCommandPlugin.getCreateDockerVolumeCommand(
-  ctx.getContainer());
-  if (dockerVolumeCommand != null) {
-runDockerVolumeCommand(dockerVolumeCommand, container);
-
-// After volume created, run inspect to make sure volume properly
-// created.
-if (dockerVolumeCommand.getSubCommand().equals(
-DockerVolumeCommand.VOLUME_CREATE_SUB_COMMAND)) {
-  checkDockerVolumeCreated(dockerVolumeCommand, container);
-}
-  }
-}
-  }
-}
   }
 
   private void checkDockerVolumeCreated(
@@ -1034,14 +1008,30 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   }
 }
 
-// use plugins to update docker run command.
+// use plugins to create volume and update docker run command.
 if (nmContext != null
 && nmContext.getResourcePluginManager().getNameToPlugins() != null) {
   for (ResourcePlugin plugin : nmContext.getResourcePluginManager()
   .getNameToPlugins().values()) {
 DockerCommandPlugin dockerCommandPlugin =
 plugin.getDockerCommandPluginInstance();
+
 if (dockerCommandPlugin != null) {
+  // Create volumes when needed.
+  DockerVolumeCommand dockerVolumeCommand =
+  dockerCommandPlugin.getCreateDockerVolumeCommand(
+  ctx.getContainer());
+  if (dockerVolumeCommand != null) {
+runDockerVolumeCommand(dockerVolumeCommand, container);
+
+// After volume created, run inspect to make sure volume properly
+// created.
+if (dockerVolumeCommand.getSubCommand().equals(
+DockerVolumeCommand.VOLUME_CREATE_SUB_COMMAND)) {
+  checkDockerVolumeCreated(dockerVolumeCommand, container);
+}
+  }
+  // Update cmd
   dockerCommandPlugin.updateDockerRunCommand(runCommand, container);
 }
   }


[2/6] hadoop git commit: YARN-8882. [YARN-8851] Add a shared device mapping manager (scheduler) for device plugins. (Zhankun Tang via wangda)

2018-11-28 Thread wangda
YARN-8882. [YARN-8851] Add a shared device mapping manager (scheduler) for 
device plugins. (Zhankun Tang via wangda)

Change-Id: I9435136642c3d556971a357bf687f69df90bb45e


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/579ef4be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/579ef4be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/579ef4be

Branch: refs/heads/trunk
Commit: 579ef4be063745c5211127eca83a393ceddc8b79
Parents: 9de8e8d
Author: Wangda Tan 
Authored: Wed Nov 28 14:09:52 2018 -0800
Committer: Wangda Tan 
Committed: Wed Nov 28 14:09:52 2018 -0800

--
 .../resourceplugin/ResourcePluginManager.java   |  14 +-
 .../deviceframework/DeviceMappingManager.java   | 324 
 .../deviceframework/DevicePluginAdapter.java|  20 +-
 .../DeviceResourceHandlerImpl.java  | 145 +++
 .../TestDeviceMappingManager.java   | 366 +
 .../TestDevicePluginAdapter.java| 388 ++-
 6 files changed, 1245 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/579ef4be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
index 9741b12..6dfe817 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DevicePlugin;
 import 
org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DeviceRegisterRequest;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework.DeviceMappingManager;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework.DevicePluginAdapter;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.fpga.FpgaResourcePlugin;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu.GpuResourcePlugin;
@@ -52,12 +53,13 @@ import static 
org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI;
 public class ResourcePluginManager {
   private static final Logger LOG =
   LoggerFactory.getLogger(ResourcePluginManager.class);
-  private static final Set SUPPORTED_RESOURCE_PLUGINS = 
ImmutableSet.of(
-  GPU_URI, FPGA_URI);
+  private static final Set SUPPORTED_RESOURCE_PLUGINS =
+  ImmutableSet.of(GPU_URI, FPGA_URI);
 
   private Map configuredPlugins =
   Collections.emptyMap();
 
+  private DeviceMappingManager deviceMappingManager = null;
 
   public synchronized void initialize(Context context)
   throws YarnException, ClassNotFoundException {
@@ -123,7 +125,7 @@ public class ResourcePluginManager {
   throws YarnRuntimeException, ClassNotFoundException {
 LOG.info("The pluggable device framework enabled," +
 "trying to load the vendor plugins");
-
+deviceMappingManager = new DeviceMappingManager(context);
 String[] pluginClassNames = configuration.getStrings(
 YarnConfiguration.NM_PLUGGABLE_DEVICE_FRAMEWORK_DEVICE_CLASSES);
 if (null == pluginClassNames) {
@@ -174,7 +176,7 @@ public class ResourcePluginManager {
   resourceName,
   pluginClassName);
   DevicePluginAdapter pluginAdapter = new DevicePluginAdapter(
-  resourceName, dpInstance);
+  resourceName, dpInstance, deviceMappingManager);
   LOG.info("Adapter of {} created. Initializing..", pluginClassName);
   try {
 pluginAdapter.initialize(context);
@@ -235,6 +237,10 @@ public class ResourcePluginManager {
 return true;
   }
 
+  public DeviceMappingManager getDeviceMappingManager() {
+return deviceMappingManager;
+  }
+
   public synchronized void cleanup() throws YarnException {
 

[5/6] hadoop git commit: YARN-8975. [Submarine] Use predefined Charset object StandardCharsets.UTF_8 instead of String UTF-8. (Zhankun Tang via wangda)

2018-11-28 Thread wangda
YARN-8975. [Submarine] Use predefined Charset object StandardCharsets.UTF_8 
instead of String UTF-8. (Zhankun Tang via wangda)

Change-Id: If6c7904aa17895e543cfca245264249eb7328bdc


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89764392
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89764392
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89764392

Branch: refs/heads/trunk
Commit: 897643928c534062d45d00a36b95fd99b4f6
Parents: 8ebeda9
Author: Wangda Tan 
Authored: Wed Nov 28 14:39:06 2018 -0800
Committer: Wangda Tan 
Committed: Wed Nov 28 14:39:06 2018 -0800

--
 .../submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89764392/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
index b58ad77..2e84c96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
@@ -49,6 +49,7 @@ import java.io.IOException;
 import java.io.OutputStreamWriter;
 import java.io.PrintWriter;
 import java.io.Writer;
+import java.nio.charset.StandardCharsets;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -218,7 +219,8 @@ public class YarnServiceJobSubmitter implements 
JobSubmitter {
   private String generateCommandLaunchScript(RunJobParameters parameters,
   TaskType taskType, Component comp) throws IOException {
 File file = File.createTempFile(taskType.name() + "-launch-script", ".sh");
-Writer w = new OutputStreamWriter(new FileOutputStream(file), "UTF-8");
+Writer w = new OutputStreamWriter(new FileOutputStream(file),
+StandardCharsets.UTF_8);
 PrintWriter pw = new PrintWriter(w);
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7164. FileOutputCommitter does not report progress while merging paths. Contributed by Kuhu Shukla

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 e4dcc3e60 -> 4c238b50d


MAPREDUCE-7164. FileOutputCommitter does not report progress while merging 
paths. Contributed by Kuhu Shukla

(cherry picked from commit 4d8de7ab690ef919b392b12d856482a6a1f2bb3d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c238b50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c238b50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c238b50

Branch: refs/heads/branch-3.0
Commit: 4c238b50dfd83a10923bfd6eb28d7a6ad864a40f
Parents: e4dcc3e
Author: Jason Lowe 
Authored: Wed Nov 28 14:54:59 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 16:10:02 2018 -0600

--
 .../lib/output/FileOutputCommitter.java | 28 +++--
 .../lib/output/TestFileOutputCommitter.java | 33 
 2 files changed, 51 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c238b50/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index 86af2cf..0ed3259 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.Progressable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -389,7 +390,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
 
   if (algorithmVersion == 1) {
 for (FileStatus stat: getAllCommittedTaskPaths(context)) {
-  mergePaths(fs, stat, finalOutput);
+  mergePaths(fs, stat, finalOutput, context);
 }
   }
 
@@ -440,10 +441,11 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
* @throws IOException on any error
*/
   private void mergePaths(FileSystem fs, final FileStatus from,
-  final Path to) throws IOException {
+  final Path to, JobContext context) throws IOException {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Merging data from " + from + " to " + to);
 }
+reportProgress(context);
 FileStatus toStat;
 try {
   toStat = fs.getFileStatus(to);
@@ -467,22 +469,28 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   if (!fs.delete(to, true)) {
 throw new IOException("Failed to delete " + to);
   }
-  renameOrMerge(fs, from, to);
+  renameOrMerge(fs, from, to, context);
 } else {
   //It is a directory so merge everything in the directories
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   } else {
-renameOrMerge(fs, from, to);
+renameOrMerge(fs, from, to, context);
   }
 }
   }
 
-  private void renameOrMerge(FileSystem fs, FileStatus from, Path to)
-  throws IOException {
+  private void reportProgress(JobContext context) {
+if (context instanceof Progressable) {
+  ((Progressable) context).progress();
+}
+  }
+
+  private void renameOrMerge(FileSystem fs, FileStatus from, Path to,
+  JobContext context) throws IOException {
 if (algorithmVersion == 1) {
   if (!fs.rename(from.getPath(), to)) {
 throw new IOException("Failed to rename " + from + " to " + to);
@@ -491,7 +499,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   fs.mkdirs(to);
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   }
@@ -583,7 +591,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   committedTaskPath);
 } else {
   // directly merge everything from 

hadoop git commit: MAPREDUCE-7164. FileOutputCommitter does not report progress while merging paths. Contributed by Kuhu Shukla

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 d9457df98 -> e7fa638fe


MAPREDUCE-7164. FileOutputCommitter does not report progress while merging 
paths. Contributed by Kuhu Shukla

(cherry picked from commit 4d8de7ab690ef919b392b12d856482a6a1f2bb3d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7fa638f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7fa638f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7fa638f

Branch: refs/heads/branch-3.1
Commit: e7fa638fe8588174d5d3db287779531de09a3e1b
Parents: d9457df
Author: Jason Lowe 
Authored: Wed Nov 28 14:54:59 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 16:01:05 2018 -0600

--
 .../lib/output/FileOutputCommitter.java | 28 +++--
 .../lib/output/TestFileOutputCommitter.java | 33 
 2 files changed, 51 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7fa638f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index cbae575..94af338 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.Progressable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -400,7 +401,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
 
   if (algorithmVersion == 1) {
 for (FileStatus stat: getAllCommittedTaskPaths(context)) {
-  mergePaths(fs, stat, finalOutput);
+  mergePaths(fs, stat, finalOutput, context);
 }
   }
 
@@ -451,10 +452,11 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
* @throws IOException on any error
*/
   private void mergePaths(FileSystem fs, final FileStatus from,
-  final Path to) throws IOException {
+  final Path to, JobContext context) throws IOException {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Merging data from " + from + " to " + to);
 }
+reportProgress(context);
 FileStatus toStat;
 try {
   toStat = fs.getFileStatus(to);
@@ -478,22 +480,28 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   if (!fs.delete(to, true)) {
 throw new IOException("Failed to delete " + to);
   }
-  renameOrMerge(fs, from, to);
+  renameOrMerge(fs, from, to, context);
 } else {
   //It is a directory so merge everything in the directories
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   } else {
-renameOrMerge(fs, from, to);
+renameOrMerge(fs, from, to, context);
   }
 }
   }
 
-  private void renameOrMerge(FileSystem fs, FileStatus from, Path to)
-  throws IOException {
+  private void reportProgress(JobContext context) {
+if (context instanceof Progressable) {
+  ((Progressable) context).progress();
+}
+  }
+
+  private void renameOrMerge(FileSystem fs, FileStatus from, Path to,
+  JobContext context) throws IOException {
 if (algorithmVersion == 1) {
   if (!fs.rename(from.getPath(), to)) {
 throw new IOException("Failed to rename " + from + " to " + to);
@@ -502,7 +510,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   fs.mkdirs(to);
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   }
@@ -594,7 +602,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   committedTaskPath);
 } else {
   // directly merge everything from 

hadoop git commit: MAPREDUCE-7164. FileOutputCommitter does not report progress while merging paths. Contributed by Kuhu Shukla

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 df0e7766e -> 7a78bdf7b


MAPREDUCE-7164. FileOutputCommitter does not report progress while merging 
paths. Contributed by Kuhu Shukla

(cherry picked from commit 4d8de7ab690ef919b392b12d856482a6a1f2bb3d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a78bdf7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a78bdf7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a78bdf7

Branch: refs/heads/branch-3.2
Commit: 7a78bdf7bbf278678dc10de3133930723972b60d
Parents: df0e776
Author: Jason Lowe 
Authored: Wed Nov 28 14:54:59 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 15:54:59 2018 -0600

--
 .../lib/output/FileOutputCommitter.java | 28 +++--
 .../lib/output/TestFileOutputCommitter.java | 33 
 2 files changed, 51 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a78bdf7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index cbae575..94af338 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.Progressable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -400,7 +401,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
 
   if (algorithmVersion == 1) {
 for (FileStatus stat: getAllCommittedTaskPaths(context)) {
-  mergePaths(fs, stat, finalOutput);
+  mergePaths(fs, stat, finalOutput, context);
 }
   }
 
@@ -451,10 +452,11 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
* @throws IOException on any error
*/
   private void mergePaths(FileSystem fs, final FileStatus from,
-  final Path to) throws IOException {
+  final Path to, JobContext context) throws IOException {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Merging data from " + from + " to " + to);
 }
+reportProgress(context);
 FileStatus toStat;
 try {
   toStat = fs.getFileStatus(to);
@@ -478,22 +480,28 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   if (!fs.delete(to, true)) {
 throw new IOException("Failed to delete " + to);
   }
-  renameOrMerge(fs, from, to);
+  renameOrMerge(fs, from, to, context);
 } else {
   //It is a directory so merge everything in the directories
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   } else {
-renameOrMerge(fs, from, to);
+renameOrMerge(fs, from, to, context);
   }
 }
   }
 
-  private void renameOrMerge(FileSystem fs, FileStatus from, Path to)
-  throws IOException {
+  private void reportProgress(JobContext context) {
+if (context instanceof Progressable) {
+  ((Progressable) context).progress();
+}
+  }
+
+  private void renameOrMerge(FileSystem fs, FileStatus from, Path to,
+  JobContext context) throws IOException {
 if (algorithmVersion == 1) {
   if (!fs.rename(from.getPath(), to)) {
 throw new IOException("Failed to rename " + from + " to " + to);
@@ -502,7 +510,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   fs.mkdirs(to);
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   }
@@ -594,7 +602,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   committedTaskPath);
 } else {
   // directly merge everything from 

hadoop git commit: MAPREDUCE-7164. FileOutputCommitter does not report progress while merging paths. Contributed by Kuhu Shukla

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 300f560fc -> 4d8de7ab6


MAPREDUCE-7164. FileOutputCommitter does not report progress while merging 
paths. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d8de7ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d8de7ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d8de7ab

Branch: refs/heads/trunk
Commit: 4d8de7ab690ef919b392b12d856482a6a1f2bb3d
Parents: 300f560
Author: Jason Lowe 
Authored: Wed Nov 28 14:54:59 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 14:54:59 2018 -0600

--
 .../lib/output/FileOutputCommitter.java | 28 +++--
 .../lib/output/TestFileOutputCommitter.java | 33 
 2 files changed, 51 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d8de7ab/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index cbae575..94af338 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.Progressable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -400,7 +401,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
 
   if (algorithmVersion == 1) {
 for (FileStatus stat: getAllCommittedTaskPaths(context)) {
-  mergePaths(fs, stat, finalOutput);
+  mergePaths(fs, stat, finalOutput, context);
 }
   }
 
@@ -451,10 +452,11 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
* @throws IOException on any error
*/
   private void mergePaths(FileSystem fs, final FileStatus from,
-  final Path to) throws IOException {
+  final Path to, JobContext context) throws IOException {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Merging data from " + from + " to " + to);
 }
+reportProgress(context);
 FileStatus toStat;
 try {
   toStat = fs.getFileStatus(to);
@@ -478,22 +480,28 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   if (!fs.delete(to, true)) {
 throw new IOException("Failed to delete " + to);
   }
-  renameOrMerge(fs, from, to);
+  renameOrMerge(fs, from, to, context);
 } else {
   //It is a directory so merge everything in the directories
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   } else {
-renameOrMerge(fs, from, to);
+renameOrMerge(fs, from, to, context);
   }
 }
   }
 
-  private void renameOrMerge(FileSystem fs, FileStatus from, Path to)
-  throws IOException {
+  private void reportProgress(JobContext context) {
+if (context instanceof Progressable) {
+  ((Progressable) context).progress();
+}
+  }
+
+  private void renameOrMerge(FileSystem fs, FileStatus from, Path to,
+  JobContext context) throws IOException {
 if (algorithmVersion == 1) {
   if (!fs.rename(from.getPath(), to)) {
 throw new IOException("Failed to rename " + from + " to " + to);
@@ -502,7 +510,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   fs.mkdirs(to);
   for (FileStatus subFrom : fs.listStatus(from.getPath())) {
 Path subTo = new Path(to, subFrom.getPath().getName());
-mergePaths(fs, subFrom, subTo);
+mergePaths(fs, subFrom, subTo, context);
   }
 }
   }
@@ -594,7 +602,7 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   committedTaskPath);
 } else {
   // directly merge everything from taskAttemptPath to output directory
-  mergePaths(fs, taskAttemptDirStatus, 

hadoop git commit: HDFS-14102. Performance improvement in BlockPlacementPolicyDefault. Contributed by Beluga Behr.

2018-11-28 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4ca3a6b21 -> 300f560fc


HDFS-14102. Performance improvement in BlockPlacementPolicyDefault. Contributed 
by Beluga Behr.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/300f560f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/300f560f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/300f560f

Branch: refs/heads/trunk
Commit: 300f560fcca879d212eadffaa59fbbb7017c9a3f
Parents: 4ca3a6b
Author: Giovanni Matteo Fumarola 
Authored: Wed Nov 28 11:33:22 2018 -0800
Committer: Giovanni Matteo Fumarola 
Committed: Wed Nov 28 11:33:22 2018 -0800

--
 .../BlockPlacementPolicyDefault.java| 22 
 1 file changed, 13 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/300f560f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d396845..5b8a8b4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -71,6 +71,9 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   CHOOSE_RANDOM_REASONS = ThreadLocal
   .withInitial(() -> new HashMap());
 
+  private static final BlockPlacementStatus ONE_RACK_PLACEMENT =
+  new BlockPlacementStatusDefault(1, 1, 1);
+
   private enum NodeNotChosenReason {
 NOT_IN_SERVICE("the node is not in service"),
 NODE_STALE("the node is stale"),
@@ -1029,22 +1032,23 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   @Override
   public BlockPlacementStatus verifyBlockPlacement(DatanodeInfo[] locs,
   int numberOfReplicas) {
-if (locs == null)
+if (locs == null) {
   locs = DatanodeDescriptor.EMPTY_ARRAY;
+}
 if (!clusterMap.hasClusterEverBeenMultiRack()) {
   // only one rack
-  return new BlockPlacementStatusDefault(1, 1, 1);
+  return ONE_RACK_PLACEMENT;
 }
-int minRacks = 2;
-minRacks = Math.min(minRacks, numberOfReplicas);
+final int minRacks = Math.min(2, numberOfReplicas);
 // 1. Check that all locations are different.
 // 2. Count locations on different racks.
-Set racks = new TreeSet<>();
-for (DatanodeInfo dn : locs)
-  racks.add(dn.getNetworkLocation());
-return new BlockPlacementStatusDefault(racks.size(), minRacks,
-clusterMap.getNumOfRacks());
+final long rackCount = Arrays.asList(locs).stream()
+.map(dn -> dn.getNetworkLocation()).distinct().count();
+
+return new BlockPlacementStatusDefault(Math.toIntExact(rackCount),
+minRacks, clusterMap.getNumOfRacks());
   }
+
   /**
* Decide whether deleting the specified replica of the block still makes
* the block conform to the configured block placement policy.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14108. Performance improvement in BlockManager Data Structures. Contributed by Beluga Behr.

2018-11-28 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/trunk 64a4b6b08 -> 4ca3a6b21


HDFS-14108. Performance improvement in BlockManager Data Structures. 
Contributed by Beluga Behr.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ca3a6b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ca3a6b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ca3a6b2

Branch: refs/heads/trunk
Commit: 4ca3a6b21a3a25acf16d026c699154047b1f686b
Parents: 64a4b6b
Author: Giovanni Matteo Fumarola 
Authored: Wed Nov 28 11:25:47 2018 -0800
Committer: Giovanni Matteo Fumarola 
Committed: Wed Nov 28 11:25:47 2018 -0800

--
 .../server/blockmanagement/BlockManager.java| 30 
 1 file changed, 12 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ca3a6b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 36bbeb1..b326a75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -34,12 +34,10 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashSet;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
-import java.util.TreeSet;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
@@ -844,7 +842,7 @@ public class BlockManager implements BlockStatsMXBean {
 // source node returned is not used
 chooseSourceDatanodes(getStoredBlock(block), containingNodes,
 containingLiveReplicasNodes, numReplicas,
-new LinkedList(), LowRedundancyBlocks.LEVEL);
+new ArrayList(), LowRedundancyBlocks.LEVEL);
 
 // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which 
are 
 // not included in the numReplicas.liveReplicas() count
@@ -1842,7 +1840,7 @@ public class BlockManager implements BlockStatsMXBean {
   int computeReconstructionWorkForBlocks(
   List> blocksToReconstruct) {
 int scheduledWork = 0;
-List reconWork = new LinkedList<>();
+List reconWork = new ArrayList<>();
 
 // Step 1: categorize at-risk blocks into replication and EC tasks
 namesystem.writeLock();
@@ -1864,14 +1862,10 @@ public class BlockManager implements BlockStatsMXBean {
 }
 
 // Step 2: choose target nodes for each reconstruction task
-final Set excludedNodes = new HashSet<>();
-for(BlockReconstructionWork rw : reconWork){
+for (BlockReconstructionWork rw : reconWork) {
   // Exclude all of the containing nodes from being targets.
   // This list includes decommissioning or corrupt nodes.
-  excludedNodes.clear();
-  for (DatanodeDescriptor dn : rw.getContainingNodes()) {
-excludedNodes.add(dn);
-  }
+  final Set excludedNodes = new HashSet<>(rw.getContainingNodes());
 
   // choose replication targets: NOT HOLDING THE GLOBAL LOCK
   final BlockPlacementPolicy placementPolicy =
@@ -1882,9 +1876,9 @@ public class BlockManager implements BlockStatsMXBean {
 // Step 3: add tasks to the DN
 namesystem.writeLock();
 try {
-  for(BlockReconstructionWork rw : reconWork){
+  for (BlockReconstructionWork rw : reconWork) {
 final DatanodeStorageInfo[] targets = rw.getTargets();
-if(targets == null || targets.length == 0){
+if (targets == null || targets.length == 0) {
   rw.resetTargets();
   continue;
 }
@@ -1901,7 +1895,7 @@ public class BlockManager implements BlockStatsMXBean {
 
 if (blockLog.isDebugEnabled()) {
   // log which blocks have been scheduled for reconstruction
-  for(BlockReconstructionWork rw : reconWork){
+  for (BlockReconstructionWork rw : reconWork) {
 DatanodeStorageInfo[] targets = rw.getTargets();
 if (targets != null && targets.length != 0) {
   StringBuilder targetList = new StringBuilder("datanode(s)");
@@ -2666,11 +2660,11 @@ public class BlockManager implements BlockStatsMXBean {
 // Modify the (block-->datanode) map, according to the difference
 // between the old and new block report.
 //
-Collection toAdd = new LinkedList<>();
-Collection toRemove = new 

hadoop git commit: HDDS-284. Checksum for ChunksData.

2018-11-28 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 085f10e75 -> 64a4b6b08


HDDS-284. Checksum for ChunksData.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64a4b6b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64a4b6b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64a4b6b0

Branch: refs/heads/trunk
Commit: 64a4b6b08baf68c47cb17079bad2190f86166f50
Parents: 085f10e
Author: Hanisha Koneru 
Authored: Wed Nov 28 10:53:12 2018 -0800
Committer: Hanisha Koneru 
Committed: Wed Nov 28 10:53:12 2018 -0800

--
 .../hdds/scm/storage/ChunkInputStream.java  |  11 +
 .../hdds/scm/storage/ChunkOutputStream.java |  21 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  14 +-
 .../apache/hadoop/ozone/common/Checksum.java| 239 +++
 .../hadoop/ozone/common/ChecksumData.java   | 190 +++
 .../ozone/common/OzoneChecksumException.java|  66 +
 .../container/common/helpers/ChunkInfo.java |  28 +--
 .../main/proto/DatanodeContainerProtocol.proto  |  18 +-
 .../hadoop/ozone/common/TestChecksum.java   | 101 
 .../container/keyvalue/helpers/ChunkUtils.java  |  43 +---
 .../keyvalue/impl/ChunkManagerImpl.java |   4 -
 .../keyvalue/TestChunkManagerImpl.java  |  16 --
 .../ozone/client/io/ChunkGroupOutputStream.java |  30 ++-
 .../hadoop/ozone/client/rpc/RpcClient.java  |  25 ++
 .../ozone/client/rpc/TestOzoneRpcClient.java|  98 +++-
 .../ozone/container/ContainerTestHelper.java|  17 +-
 .../common/TestBlockDeletingService.java|   3 +-
 .../common/impl/TestContainerPersistence.java   |  32 ++-
 .../web/storage/DistributedStorageHandler.java  |  25 ++
 19 files changed, 862 insertions(+), 119 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64a4b6b0/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
index 7b243d8..2e24aca 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
@@ -18,6 +18,10 @@
 
 package org.apache.hadoop.hdds.scm.storage;
 
+import org.apache.hadoop.hdds.scm.container.common.helpers
+.StorageContainerException;
+import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.common.ChecksumData;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.apache.hadoop.fs.Seekable;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
@@ -206,6 +210,9 @@ public class ChunkInputStream extends InputStream 
implements Seekable {
   readChunkResponse = ContainerProtocolCalls
   .readChunk(xceiverClient, chunkInfo, blockID, traceID);
 } catch (IOException e) {
+  if (e instanceof StorageContainerException) {
+throw e;
+  }
   throw new IOException("Unexpected OzoneException: " + e.toString(), e);
 }
 ByteString byteString = readChunkResponse.getData();
@@ -215,6 +222,10 @@ public class ChunkInputStream extends InputStream 
implements Seekable {
   .format("Inconsistent read for chunk=%s len=%d bytesRead=%d",
   chunkInfo.getChunkName(), chunkInfo.getLen(), 
byteString.size()));
 }
+ChecksumData checksumData = ChecksumData.getFromProtoBuf(
+chunkInfo.getChecksumData());
+Checksum.verifyChecksum(byteString, checksumData);
+
 buffers = byteString.asReadOnlyByteBufferList();
 bufferIndex = 0;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64a4b6b0/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index bdc6a83..85f8646 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -21,6 +21,9 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.XceiverClientAsyncReply;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.common.Checksum;
+import 

[2/3] hadoop git commit: HADOOP-15798. LocalMetadataStore put() does not retain isDeleted in parent listing. Contributed by Gabor Bota.

2018-11-28 Thread mackrorysd
HADOOP-15798. LocalMetadataStore put() does not retain isDeleted in parent 
listing. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e148c3ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e148c3ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e148c3ff

Branch: refs/heads/trunk
Commit: e148c3ff094e16d788f0bf9833ced02d27640805
Parents: 5d96b74
Author: Sean Mackrory 
Authored: Mon Nov 26 09:25:36 2018 -0700
Committer: Sean Mackrory 
Committed: Wed Nov 28 10:45:09 2018 -0700

--
 .../fs/s3a/s3guard/LocalMetadataEntry.java  |  3 ++
 .../fs/s3a/s3guard/LocalMetadataStore.java  | 29 +---
 .../fs/s3a/ITestS3GuardListConsistency.java | 18 +++-
 .../fs/s3a/s3guard/MetadataStoreTestBase.java   | 15 ++
 4 files changed, 48 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e148c3ff/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
index 6040d67..5405074 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataEntry.java
@@ -31,6 +31,9 @@ public final class LocalMetadataEntry {
   @Nullable
   private DirListingMetadata dirListingMetadata;
 
+  LocalMetadataEntry() {
+  }
+
   LocalMetadataEntry(PathMetadata pmd){
 pathMetadata = pmd;
 dirListingMetadata = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e148c3ff/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
index 1a7f028..b8f9635 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
@@ -269,19 +269,28 @@ public class LocalMetadataStore implements MetadataStore {
   Path parentPath = path.getParent();
   if (parentPath != null) {
 LocalMetadataEntry parentMeta = localCache.getIfPresent(parentPath);
-DirListingMetadata parentDirMeta =
-new DirListingMetadata(parentPath, DirListingMetadata.EMPTY_DIR,
-false);
-parentDirMeta.put(status);
-
-getDirListingMeta(parentPath);
 
+// Create empty parent LocalMetadataEntry if it doesn't exist
 if (parentMeta == null){
-  localCache.put(parentPath, new LocalMetadataEntry(parentDirMeta));
-} else if (!parentMeta.hasDirMeta()) {
+  parentMeta = new LocalMetadataEntry();
+  localCache.put(parentPath, parentMeta);
+}
+
+// If there is no directory metadata on the parent entry, create
+// an empty one
+if (!parentMeta.hasDirMeta()) {
+  DirListingMetadata parentDirMeta =
+  new DirListingMetadata(parentPath, DirListingMetadata.EMPTY_DIR,
+  false);
   parentMeta.setDirListingMetadata(parentDirMeta);
-} else {
-  parentMeta.getDirListingMeta().put(status);
+}
+
+// Add the child status to the listing
+parentMeta.getDirListingMeta().put(status);
+
+// Mark the listing entry as deleted if the meta is set to deleted
+if(meta.isDeleted()) {
+  parentMeta.getDirListingMeta().markDeleted(path);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e148c3ff/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
index a1df1a5..d3b3c21 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java
@@ -178,7 +178,8 @@ public class ITestS3GuardListConsistency extends 
AbstractS3ATestBase {
   public void 

[3/3] hadoop git commit: HADOOP-15370. S3A log message on rm s3a://bucket/ not intuitive. Contributed by Gabor Bota.

2018-11-28 Thread mackrorysd
HADOOP-15370. S3A log message on rm s3a://bucket/ not intuitive. Contributed by 
Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d96b74f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d96b74f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d96b74f

Branch: refs/heads/trunk
Commit: 5d96b74f33ca716c9fe4fadb046f79ed488a3059
Parents: 3ce99e3
Author: Sean Mackrory 
Authored: Mon Nov 26 08:54:39 2018 -0700
Committer: Sean Mackrory 
Committed: Wed Nov 28 10:45:09 2018 -0700

--
 .../main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java| 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d96b74f/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index df7bd77..e6eab8a 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -1826,16 +1826,20 @@ public class S3AFileSystem extends FileSystem 
implements StreamCapabilities {
*/
   private boolean rejectRootDirectoryDelete(S3AFileStatus status,
   boolean recursive) throws IOException {
-LOG.info("s3a delete the {} root directory of {}", bucket, recursive);
+LOG.info("s3a delete the {} root directory. Path: {}. Recursive: {}",
+bucket, status.getPath(), recursive);
 boolean emptyRoot = status.isEmptyDirectory() == Tristate.TRUE;
 if (emptyRoot) {
   return true;
 }
 if (recursive) {
+  LOG.error("Cannot delete root path: {}", status.getPath());
   return false;
 } else {
   // reject
-  throw new PathIOException(bucket, "Cannot delete root path");
+  String msg = "Cannot delete root path: " + status.getPath();
+  LOG.error(msg);
+  throw new PathIOException(bucket, msg);
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HADOOP-15947. Fix ITestDynamoDBMetadataStore test error issues. Contributed by Gabor Bota.

2018-11-28 Thread mackrorysd
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3ce99e32f -> 085f10e75


HADOOP-15947. Fix ITestDynamoDBMetadataStore test error issues. Contributed by 
Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/085f10e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/085f10e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/085f10e7

Branch: refs/heads/trunk
Commit: 085f10e75dea5446861253cf63aced337536481c
Parents: e148c3f
Author: Sean Mackrory 
Authored: Mon Nov 26 10:42:34 2018 -0700
Committer: Sean Mackrory 
Committed: Wed Nov 28 10:45:09 2018 -0700

--
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java |  2 +-
 .../s3a/s3guard/ITestDynamoDBMetadataStore.java   | 18 ++
 2 files changed, 15 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/085f10e7/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 6f892e5..90f6180 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -633,7 +633,7 @@ public class DynamoDBMetadataStore implements MetadataStore 
{
   LOG.trace("Listing table {} in region {} for {} returning {}",
   tableName, region, path, metas);
 
-  return (metas.isEmpty() || dirPathMeta == null)
+  return (metas.isEmpty() && dirPathMeta == null)
   ? null
   : new DirListingMetadata(path, metas, isAuthoritative,
   dirPathMeta.getLastUpdated());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/085f10e7/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
index 5355910..5ae8356 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
@@ -80,6 +80,11 @@ import static org.apache.hadoop.test.LambdaTestUtils.*;
  * A table will be created and shared between the tests,
  */
 public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
+
+  public ITestDynamoDBMetadataStore() {
+super();
+  }
+
   private static final Logger LOG =
   LoggerFactory.getLogger(ITestDynamoDBMetadataStore.class);
   public static final PrimaryKey
@@ -574,8 +579,8 @@ public class ITestDynamoDBMetadataStore extends 
MetadataStoreTestBase {
   }
 
   @Test
-  public void testProvisionTable() throws IOException {
-final String tableName = "testProvisionTable";
+  public void testProvisionTable() throws Exception {
+final String tableName =  "testProvisionTable-" + UUID.randomUUID();
 Configuration conf = getFileSystem().getConf();
 conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
 
@@ -587,13 +592,18 @@ public class ITestDynamoDBMetadataStore extends 
MetadataStoreTestBase {
   ddbms.provisionTable(oldProvision.getReadCapacityUnits() * 2,
   oldProvision.getWriteCapacityUnits() * 2);
   ddbms.initTable();
+  // we have to wait until the provisioning settings are applied,
+  // so until the table is ACTIVE again and not in UPDATING
+  ddbms.getTable().waitForActive();
   final ProvisionedThroughputDescription newProvision =
   dynamoDB.getTable(tableName).describe().getProvisionedThroughput();
   LOG.info("Old provision = {}, new provision = {}", oldProvision,
   newProvision);
-  assertEquals(oldProvision.getReadCapacityUnits() * 2,
+  assertEquals("Check newly provisioned table read capacity units.",
+  oldProvision.getReadCapacityUnits() * 2,
   newProvision.getReadCapacityUnits().longValue());
-  assertEquals(oldProvision.getWriteCapacityUnits() * 2,
+  assertEquals("Check newly provisioned table write capacity units.",
+  oldProvision.getWriteCapacityUnits() * 2,
   newProvision.getWriteCapacityUnits().longValue());
   ddbms.destroy();
 }


-
To unsubscribe, 

hadoop git commit: YARN-8812. Containers fail during creating a symlink which started with hyphen for a resource file. Contributed by Oleksandr Shevchenko

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 7dac29411 -> e4dcc3e60


YARN-8812. Containers fail during creating a symlink which started with hyphen 
for a resource file. Contributed by Oleksandr Shevchenko

(cherry picked from commit 3ce99e32f7d7887412cae8337cd4ebeb3b2ee308)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4dcc3e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4dcc3e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4dcc3e6

Branch: refs/heads/branch-3.0
Commit: e4dcc3e606902ce15c1b160bbedc3ebe8ccb3a0b
Parents: 7dac294
Author: Jason Lowe 
Authored: Wed Nov 28 08:46:11 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 08:56:04 2018 -0600

--
 .../nodemanager/containermanager/launcher/ContainerLaunch.java | 2 +-
 .../nodemanager/containermanager/launcher/TestContainerLaunch.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4dcc3e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 114c681..b6d91d4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -1199,7 +1199,7 @@ public class ContainerLaunch implements Callable 
{
 
 @Override
 protected void link(Path src, Path dst) throws IOException {
-  line("ln -sf \"", src.toUri().getPath(), "\" \"", dst.toString(), "\"");
+  line("ln -sf -- \"", src.toUri().getPath(), "\" \"", dst.toString(), 
"\"");
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4dcc3e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 2a0a763..685baaf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -152,7 +152,7 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 File shellFile = null;
 File tempFile = null;
 String badSymlink = Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" :
-  "foo@zz%_#*&!-+= bar()";
+  "-foo@zz%_#*&!-+= bar()";
 File symLinkFile = null;
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8812. Containers fail during creating a symlink which started with hyphen for a resource file. Contributed by Oleksandr Shevchenko

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 fc74a3f80 -> d9457df98


YARN-8812. Containers fail during creating a symlink which started with hyphen 
for a resource file. Contributed by Oleksandr Shevchenko

(cherry picked from commit 3ce99e32f7d7887412cae8337cd4ebeb3b2ee308)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9457df9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9457df9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9457df9

Branch: refs/heads/branch-3.1
Commit: d9457df989ba8482d7a65dd2d781d2f28ccfd8f1
Parents: fc74a3f
Author: Jason Lowe 
Authored: Wed Nov 28 08:46:11 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 08:54:04 2018 -0600

--
 .../nodemanager/containermanager/launcher/ContainerLaunch.java | 2 +-
 .../nodemanager/containermanager/launcher/TestContainerLaunch.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9457df9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 999b66f..120ca2d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -1404,7 +1404,7 @@ public class ContainerLaunch implements Callable 
{
 
 @Override
 protected void link(Path src, Path dst) throws IOException {
-  line("ln -sf \"", src.toUri().getPath(), "\" \"", dst.toString(), "\"");
+  line("ln -sf -- \"", src.toUri().getPath(), "\" \"", dst.toString(), 
"\"");
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9457df9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 6a30f9e..ab5d47e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -159,7 +159,7 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 File shellFile = null;
 File tempFile = null;
 String badSymlink = Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" :
-  "foo@zz%_#*&!-+= bar()";
+  "-foo@zz%_#*&!-+= bar()";
 File symLinkFile = null;
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8812. Containers fail during creating a symlink which started with hyphen for a resource file. Contributed by Oleksandr Shevchenko

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 db8b2a130 -> df0e7766e


YARN-8812. Containers fail during creating a symlink which started with hyphen 
for a resource file. Contributed by Oleksandr Shevchenko

(cherry picked from commit 3ce99e32f7d7887412cae8337cd4ebeb3b2ee308)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df0e7766
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df0e7766
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df0e7766

Branch: refs/heads/branch-3.2
Commit: df0e7766e45e558a68f55ec5eb447cfb83885836
Parents: db8b2a1
Author: Jason Lowe 
Authored: Wed Nov 28 08:46:11 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 08:50:18 2018 -0600

--
 .../nodemanager/containermanager/launcher/ContainerLaunch.java | 2 +-
 .../nodemanager/containermanager/launcher/TestContainerLaunch.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df0e7766/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index f198e83..f27af55 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -1246,7 +1246,7 @@ public class ContainerLaunch implements Callable 
{
 
 @Override
 protected void link(Path src, Path dst) throws IOException {
-  line("ln -sf \"", src.toUri().getPath(), "\" \"", dst.toString(), "\"");
+  line("ln -sf -- \"", src.toUri().getPath(), "\" \"", dst.toString(), 
"\"");
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df0e7766/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 5714a1c..1f7df56 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -159,7 +159,7 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 File shellFile = null;
 File tempFile = null;
 String badSymlink = Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" :
-  "foo@zz%_#*&!-+= bar()";
+  "-foo@zz%_#*&!-+= bar()";
 File symLinkFile = null;
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8812. Containers fail during creating a symlink which started with hyphen for a resource file. Contributed by Oleksandr Shevchenko

2018-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 13a21f660 -> 3ce99e32f


YARN-8812. Containers fail during creating a symlink which started with hyphen 
for a resource file. Contributed by Oleksandr Shevchenko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ce99e32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ce99e32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ce99e32

Branch: refs/heads/trunk
Commit: 3ce99e32f7d7887412cae8337cd4ebeb3b2ee308
Parents: 13a21f6
Author: Jason Lowe 
Authored: Wed Nov 28 08:46:11 2018 -0600
Committer: Jason Lowe 
Committed: Wed Nov 28 08:46:11 2018 -0600

--
 .../nodemanager/containermanager/launcher/ContainerLaunch.java | 2 +-
 .../nodemanager/containermanager/launcher/TestContainerLaunch.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ce99e32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 45f6006..60b6e31 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -1296,7 +1296,7 @@ public class ContainerLaunch implements Callable 
{
 
 @Override
 protected void link(Path src, Path dst) throws IOException {
-  line("ln -sf \"", src.toUri().getPath(), "\" \"", dst.toString(), "\"");
+  line("ln -sf -- \"", src.toUri().getPath(), "\" \"", dst.toString(), 
"\"");
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ce99e32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 8c01175..93accf2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -167,7 +167,7 @@ public class TestContainerLaunch extends 
BaseContainerManagerTest {
 File shellFile = null;
 File tempFile = null;
 String badSymlink = Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" :
-  "foo@zz%_#*&!-+= bar()";
+  "-foo@zz%_#*&!-+= bar()";
 File symLinkFile = null;
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-6190. If a task stucks before its first heartbeat, it never timeouts and the MR job becomes stuck. Contributed by Zhaohui Xin.

2018-11-28 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk b3a052d19 -> 13a21f660


MAPREDUCE-6190. If a task stucks before its first heartbeat, it never timeouts 
and the MR job becomes stuck. Contributed by Zhaohui Xin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13a21f66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13a21f66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13a21f66

Branch: refs/heads/trunk
Commit: 13a21f66078c91df97088b01f49a5919895f7110
Parents: b3a052d
Author: Akira Ajisaka 
Authored: Wed Nov 28 17:57:42 2018 +0900
Committer: Akira Ajisaka 
Committed: Wed Nov 28 17:57:42 2018 +0900

--
 .../mapreduce/v2/app/TaskHeartbeatHandler.java  | 34 +++--
 .../v2/app/TestTaskHeartbeatHandler.java| 53 ++--
 .../apache/hadoop/mapreduce/MRJobConfig.java|  8 +++
 .../src/main/resources/mapred-default.xml   |  9 
 4 files changed, 96 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13a21f66/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
index f8f5015..456f2a6 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
@@ -22,6 +22,7 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
@@ -46,12 +47,14 @@ import org.slf4j.LoggerFactory;
  */
 @SuppressWarnings({"unchecked", "rawtypes"})
 public class TaskHeartbeatHandler extends AbstractService {
-  
-  private static class ReportTime {
+
+  static class ReportTime {
 private long lastProgress;
-
+private final AtomicBoolean reported;
+
 public ReportTime(long time) {
   setLastProgress(time);
+  reported = new AtomicBoolean(false);
 }
 
 public synchronized void setLastProgress(long time) {
@@ -61,6 +64,10 @@ public class TaskHeartbeatHandler extends AbstractService {
 public synchronized long getLastProgress() {
   return lastProgress;
 }
+
+public boolean isReported(){
+  return reported.get();
+}
   }
   
   private static final Logger LOG =
@@ -72,6 +79,7 @@ public class TaskHeartbeatHandler extends AbstractService {
   private volatile boolean stopped;
   private long taskTimeOut;
   private long unregisterTimeOut;
+  private long taskStuckTimeOut;
   private int taskTimeOutCheckInterval = 30 * 1000; // 30 seconds.
 
   private final EventHandler eventHandler;
@@ -98,6 +106,8 @@ public class TaskHeartbeatHandler extends AbstractService {
 MRJobConfig.TASK_TIMEOUT, MRJobConfig.DEFAULT_TASK_TIMEOUT_MILLIS);
 unregisterTimeOut = conf.getLong(MRJobConfig.TASK_EXIT_TIMEOUT,
 MRJobConfig.TASK_EXIT_TIMEOUT_DEFAULT);
+taskStuckTimeOut = conf.getLong(MRJobConfig.TASK_STUCK_TIMEOUT_MS,
+MRJobConfig.DEFAULT_TASK_STUCK_TIMEOUT_MS);
 
 // enforce task timeout is at least twice as long as task report interval
 long taskProgressReportIntervalMillis = MRJobConfUtil.
@@ -135,6 +145,7 @@ public class TaskHeartbeatHandler extends AbstractService {
 //TODO throw an exception if the task isn't registered.
 ReportTime time = runningAttempts.get(attemptID);
 if(time != null) {
+  time.reported.compareAndSet(false, true);
   time.setLastProgress(clock.getTime());
 }
   }
@@ -179,13 +190,21 @@ public class TaskHeartbeatHandler extends AbstractService 
{
 Map.Entry entry = iterator.next();
 boolean taskTimedOut = (taskTimeOut > 0) &&
 (currentTime > (entry.getValue().getLastProgress() + taskTimeOut));
+// when container in NM not started in a long time,
+// we think the taskAttempt is stuck
+boolean taskStuck = (!entry.getValue().isReported()) &&
+(currentTime >
+(entry.getValue().getLastProgress() + taskStuckTimeOut));
 
-if(taskTimedOut) {
+

[Hadoop Wiki] Update of "Books" by Packt Publishing

2018-11-28 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "Books" page has been changed by Packt Publishing:
https://wiki.apache.org/hadoop/Books?action=diff=54=55

  # Please don't have tracking URLs. We'll only cut them.
  }}}
  
+ === Hands-On Big Data Processing with Hadoop 3 (Video) ===
+ 
+ '''Name:'''  
[[https://www.packtpub.com/big-data-and-business-intelligence/hands-big-data-processing-hadoop-3-video|Hands-On
 Big Data Processing with Hadoop 3 (Video)]]
+ 
+ '''Author:''' Sudhanshu Saxena
+ 
+ '''Publisher:''' Packt
+ 
+ '''Date of Publishing:''' October 2018
+ 
+ Perform real-time data analytics, stream and batch processing on your 
application using Hadoop
+ 
  === Modern Big Data Processing with Hadoop ===
  
  '''Name:'''  [[https://www.amazon.com/dp/B0787KY8RH/|Modern Big Data 
Processing with Hadoop]]

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-9034. ApplicationCLI should have option to take clusterId. Contributed by Rohith Sharma K S.

2018-11-28 Thread sumasai
Repository: hadoop
Updated Branches:
  refs/heads/trunk 34a914be0 -> b3a052d19


YARN-9034. ApplicationCLI should have option to take clusterId. Contributed by 
Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7dc27219
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7dc27219
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7dc27219

Branch: refs/heads/trunk
Commit: 7dc272199ffea28c8597151e17d4606928a7f9f3
Parents: 34a914b
Author: Suma Shivaprasad 
Authored: Wed Nov 28 00:42:00 2018 -0800
Committer: Suma Shivaprasad 
Committed: Wed Nov 28 00:42:14 2018 -0800

--
 .../hadoop/yarn/client/cli/ApplicationCLI.java  | 17 
 .../hadoop/yarn/client/cli/ClusterCLI.java  |  2 +
 .../apache/hadoop/yarn/client/cli/NodeCLI.java  |  2 +
 .../apache/hadoop/yarn/client/cli/QueueCLI.java |  2 +-
 .../apache/hadoop/yarn/client/cli/TopCLI.java   |  1 +
 .../apache/hadoop/yarn/client/cli/YarnCLI.java  |  9 +-
 .../hadoop/yarn/client/cli/TestClusterCLI.java  | 41 -
 .../hadoop/yarn/client/cli/TestYarnCLI.java | 89 ++--
 .../api/impl/TimelineReaderClientImpl.java  |  2 +
 9 files changed, 92 insertions(+), 73 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dc27219/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
index 2b34154..ff8142a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -52,6 +52,7 @@ import 
org.apache.hadoop.yarn.api.records.ShellContainerCommand;
 import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.client.api.AppAdminClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
@@ -113,6 +114,7 @@ public class ApplicationCLI extends YarnCLI {
   public static final String VERSION = "version";
   public static final String STATES = "states";
   public static final String SHELL_CMD = "shell";
+  public static final String CLUSTER_ID_OPTION = "clusterId";
 
   private static String firstArg = null;
 
@@ -278,6 +280,8 @@ public class ApplicationCLI extends YarnCLI {
   "the ability to finalize the upgrade automatically.");
   opts.addOption(UPGRADE_CANCEL, false, "Works with -upgrade option to " +
   "cancel current upgrade.");
+  opts.addOption(CLUSTER_ID_OPTION, true, "ClusterId. "
+  + "By default, it will take default cluster id from the RM");
   opts.getOption(LAUNCH_CMD).setArgName("Application Name> http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dc27219/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
index 4d93949..7ead774 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
@@ -100,6 +100,8 @@ public class ClusterCLI extends YarnCLI {
   return exitCode;
 }
 
+createAndStartYarnClient();
+
 if (parsedCli.hasOption(DIRECTLY_ACCESS_NODE_LABEL_STORE)) {
   accessLocal = true;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dc27219/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java
 

[2/2] hadoop git commit: YARN-9044. LogsCLI should contact ATSv2 for -am option. Contributed by Rohith Sharma K S

2018-11-28 Thread sumasai
YARN-9044. LogsCLI should contact ATSv2 for -am option. Contributed by Rohith 
Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3a052d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3a052d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3a052d1

Branch: refs/heads/trunk
Commit: b3a052d199ff71da042029f27979a5323d3a6981
Parents: 7dc2721
Author: Suma Shivaprasad 
Authored: Wed Nov 28 00:46:53 2018 -0800
Committer: Suma Shivaprasad 
Committed: Wed Nov 28 00:46:53 2018 -0800

--
 .../apache/hadoop/yarn/client/cli/LogsCLI.java  | 105 +--
 .../hadoop/yarn/client/cli/TestLogsCLI.java |  37 +++
 2 files changed, 133 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3a052d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index a1550a5..96007f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -84,6 +85,7 @@ import 
org.apache.hadoop.yarn.logaggregation.ContainerLogFileInfo;
 import org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest;
 import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers;
 import org.apache.hadoop.yarn.logaggregation.LogToolUtils;
+import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
 import org.codehaus.jettison.json.JSONArray;
@@ -672,16 +674,31 @@ public class LogsCLI extends Configured implements Tool {
   + "and make sure the timeline server is running.");
 } else {
   try {
-amContainersList = getAMContainerInfoForAHSWebService(conf, appId);
-if (amContainersList != null && !amContainersList.isEmpty()) {
-  getAMContainerLists = true;
-  for (JSONObject amContainer : amContainersList) {
-ContainerLogsRequest amRequest = new ContainerLogsRequest(
-request);
-amRequest.setContainerId(
-amContainer.getString("amContainerId"));
-requests.add(amRequest);
+if (YarnConfiguration.timelineServiceV2Enabled(conf)) {
+  try {
+amContainersList =
+getAMContainerInfoFromTimelineReader(conf, appId);
+getAMContainerLists =
+createContainerLogsRequestForMasterContainer(requests,
+request, amContainersList,
+AppAttemptMetricsConstants.MASTER_CONTAINER_INFO);
+  } catch (Exception e) {
+System.err.println(
+"Unable to get AM container informations from "
++ "TimelineReader for the application:" + appId);
+if (YarnConfiguration.timelineServiceV1Enabled(conf)
+|| YarnConfiguration.timelineServiceV15Enabled(conf)) {
+  getAMContainerLists =
+  getAMContainerInfoForAHSWebService(conf, appId, requests,
+  request);
+} else {
+  throw e;
+}
   }
+} else {
+  getAMContainerLists =
+  getAMContainerInfoForAHSWebService(conf, appId, requests,
+  request);
 }
   } catch (Exception e) {
 errorMessage.append(e.getMessage());
@@ -739,6 +756,76 @@ public class LogsCLI extends Configured implements Tool {
 return 0;
   }
 
+  private boolean getAMContainerInfoForAHSWebService(Configuration conf,
+  String appId, List requests,
+  ContainerLogsRequest request) throws JSONException {
+List