hadoop git commit: HDFS-12563. Ozone: Creation time wrongly showed in OzoneRest.md. Contributed by Yiqun Lin.

2017-09-28 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 6843b71dd -> 1bffc246d


HDFS-12563. Ozone: Creation time wrongly showed in OzoneRest.md. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bffc246
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bffc246
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bffc246

Branch: refs/heads/HDFS-7240
Commit: 1bffc246dba1f203a50bbc22aafb15b107ad5b16
Parents: 6843b71
Author: Yiqun Lin 
Authored: Fri Sep 29 13:21:43 2017 +0800
Committer: Yiqun Lin 
Committed: Fri Sep 29 13:21:43 2017 +0800

--
 .../hadoop-hdfs/src/site/markdown/OzoneRest.md  | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bffc246/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneRest.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneRest.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneRest.md
index d17871d..0294a53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneRest.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneRest.md
@@ -170,7 +170,7 @@ this request gets the info of volume */volume-of-bilbo*, 
the client receives a r
   "owner" : { "name" : "bilbo" },
   "quota" : { "unit" : "TB", "size" : 1048576 },
   "volumeName" : "volume-of-bilbo",
-  "createdOn" : null,
+  "createdOn" : "Tue, 27 Jun 2017 07:42:04 GMT",
   "createdBy" : "root"
 }
 
@@ -211,7 +211,7 @@ this request gets all volumes owned by *bilbo* and each 
volume's name contains p
   "owner" : { "name" : "bilbo"},
   "quota" : { "unit" : "TB", "size" : 2 },
   "volumeName" : "Jan-vol1",
-  "createdOn" : null,
+  "createdOn" : "Tue, 27 Jun 2017 07:42:04 GMT",
   "createdBy" : root
   },
   ...
@@ -334,6 +334,7 @@ this request gets the info of bucket 
*/volume-of-bilbo/bucket-0*. The client rec
 {
   "volumeName" : "volume-of-bilbo",
   "bucketName" : "bucket-0",
+  "createdOn" : "Tue, 27 Jun 2017 08:55:25 GMT",
   "acls" : [ ],
   "versioning" : "DISABLED",
   "storageType" : "DISK"
@@ -373,6 +374,7 @@ this request lists all the buckets under volume 
*volume-of-bilbo*, and the resul
   "buckets" : [ {
 "volumeName" : "volume-of-bilbo",
 "bucketName" : "bucket-0",
+"createdOn" : "Tue, 27 Jun 2017 08:55:25 GMT",
 "acls" : [ ],
 "versioning" : null,
 "storageType" : "DISK",
@@ -493,7 +495,8 @@ this request returns information of the key 
*/volume-of-bilbo/bucket-0/file-0*.
 {
   "version" : 0,
   "md5hash" : null,
-  "createdOn" : null,
+  "createdOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
+  "modifiedOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
   "size" : 0,
   "keyName" : "file-0"
 }
@@ -536,7 +539,8 @@ this request list keys under bucket 
*/volume-of-bilbo/bucket-0*, the listing res
   "keyList" : [ {
   "version" : 0,
   "md5hash" : null,
-  "createdOn" : null,
+  "createdOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
+  "modifiedOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
   "size" : 0,
   "keyName" : "file-0"
   },


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12511. Ozone: Add tags to config. Contributed by Ajay Kumar.

2017-09-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 ae1e3cfb7 -> 6843b71dd


HDFS-12511. Ozone: Add tags to config. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6843b71d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6843b71d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6843b71d

Branch: refs/heads/HDFS-7240
Commit: 6843b71dd271e179bdbc3e66d4c21872a7432b30
Parents: ae1e3cf
Author: Anu Engineer 
Authored: Thu Sep 28 17:29:55 2017 -0700
Committer: Anu Engineer 
Committed: Thu Sep 28 17:29:55 2017 -0700

--
 .../org/apache/hadoop/conf/Configuration.java   |   1 +
 .../apache/hadoop/conf/OzonePropertyTag.java|  47 +
 .../src/main/resources/ozone-default.xml| 101 +++
 .../ozone/TestOzoneConfigurationFields.java |  31 ++
 4 files changed, 180 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6843b71d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 2890853..f7dfcbe 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -755,6 +755,7 @@ public class Configuration implements 
Iterable>,
 REGISTERED_TAG_CLASS.put("core", CorePropertyTag.class);
 REGISTERED_TAG_CLASS.put("hdfs", HDFSPropertyTag.class);
 REGISTERED_TAG_CLASS.put("yarn", YarnPropertyTag.class);
+REGISTERED_TAG_CLASS.put("ozone", OzonePropertyTag.class);
 
 synchronized(Configuration.class) {
   REGISTRY.put(this, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6843b71d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/OzonePropertyTag.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/OzonePropertyTag.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/OzonePropertyTag.java
new file mode 100644
index 000..162e4e1
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/OzonePropertyTag.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.conf;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/***
+ * Enum for tagging ozone properties according to there usage.
+ * OzonePropertyTag implements the
+ * {@link org.apache.hadoop.conf.PropertyTag} interface,
+ ***/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public enum OzonePropertyTag implements PropertyTag {
+  OZONE,
+  MANAGEMENT,
+  SECURITY,
+  PERFORMANCE,
+  DEBUG,
+  CLIENT,
+  SERVER,
+  KSM,
+  SCM,
+  CRITICAL,
+  RATIS,
+  CONTAINER,
+  REQUIRED,
+  REST,
+  STORAGE,
+  PIPELINE,
+  STANDALONE
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6843b71d/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml
index 0997f2a..5e5469b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml
@@ -26,6 +26,7 @@
   
 ozone.enabled
 

[1/4] hadoop git commit: YARN-6623. Add support to turn off launching privileged containers in the container-executor. (Varun Vasudev via wangda)

2017-09-28 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 3d2352211 -> 091fc32ce


http://git-wip-us.apache.org/repos/asf/hadoop/blob/091fc32c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
index 60fce40..05b44b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
@@ -114,8 +114,10 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("rm " + MOCK_CONTAINER_ID, dockerCommands.get(0));
+assertEquals(3, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=rm", dockerCommands.get(1));
+assertEquals("  name=" + MOCK_CONTAINER_ID, dockerCommands.get(2));
   }
 
   @Test
@@ -130,8 +132,10 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("stop " + MOCK_CONTAINER_ID, dockerCommands.get(0));
+assertEquals(3, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=stop", dockerCommands.get(1));
+assertEquals("  name=" + MOCK_CONTAINER_ID, dockerCommands.get(2));
   }
 
   @Test
@@ -147,9 +151,12 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("inspect --format='{{.State.Status}}' " + MOCK_CONTAINER_ID,
-dockerCommands.get(0));
+assertEquals(4, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=inspect", dockerCommands.get(1));
+assertEquals("  format={{.State.Status}}", dockerCommands.get(2));
+assertEquals("  name=" + MOCK_CONTAINER_ID, dockerCommands.get(3));
+
   }
 
   @Test
@@ -165,8 +172,10 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("pull " + MOCK_IMAGE_NAME, dockerCommands.get(0));
+assertEquals(3, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=pull", dockerCommands.get(1));
+assertEquals("  image=" + MOCK_IMAGE_NAME, dockerCommands.get(2));
   }
 
   @Test
@@ -182,8 +191,12 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("load --i=" + MOCK_LOCAL_IMAGE_NAME, dockerCommands.get(0));
+assertEquals(3, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=load", dockerCommands.get(1));
+assertEquals("  image=" + MOCK_LOCAL_IMAGE_NAME, dockerCommands.get(2));
+
+
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/091fc32c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerInspectCommand.java
--
diff --git 

[3/4] hadoop git commit: YARN-6623. Add support to turn off launching privileged containers in the container-executor. (Varun Vasudev via wangda)

2017-09-28 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/091fc32c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
new file mode 100644
index 000..860320d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -0,0 +1,998 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include "../modules/common/module-configs.h"
+#include "docker-util.h"
+#include "string-utils.h"
+#include "util.h"
+
+static int read_and_verify_command_file(const char *command_file, const char 
*docker_command,
+struct configuration *command_config) {
+  int ret = 0;
+  ret = read_config(command_file, command_config);
+  if (ret != 0) {
+return INVALID_COMMAND_FILE;
+  }
+  char *command = get_configuration_value("docker-command", 
DOCKER_COMMAND_FILE_SECTION, command_config);
+  if (command == NULL || (strcmp(command, docker_command) != 0)) {
+ret = INCORRECT_COMMAND;
+  }
+  free(command);
+  return ret;
+}
+
+static int add_to_buffer(char *buff, const size_t bufflen, const char *string) 
{
+  size_t current_len = strlen(buff);
+  size_t string_len = strlen(string);
+  if (current_len + string_len < bufflen - 1) {
+strncpy(buff + current_len, string, string_len);
+buff[current_len + string_len] = '\0';
+return 0;
+  }
+  return -1;
+}
+
+static int add_param_to_command(const struct configuration *command_config, 
const char *key, const char *param,
+const int with_argument, char *out, const 
size_t outlen) {
+  size_t tmp_buffer_size = 4096;
+  int ret = 0;
+  char *tmp_buffer = (char *) alloc_and_clear_memory(tmp_buffer_size, 
sizeof(char));
+  char *value = get_configuration_value(key, DOCKER_COMMAND_FILE_SECTION, 
command_config);
+  if (value != NULL) {
+if (with_argument) {
+  quote_and_append_arg(_buffer, _buffer_size, param, value);
+  ret = add_to_buffer(out, outlen, tmp_buffer);
+} else if (strcmp(value, "true") == 0) {
+  ret = add_to_buffer(out, outlen, param);
+}
+free(value);
+if (ret != 0) {
+  ret = BUFFER_TOO_SMALL;
+}
+  }
+  free(tmp_buffer);
+  return ret;
+}
+
+static int add_param_to_command_if_allowed(const struct configuration 
*command_config,
+   const struct configuration 
*executor_cfg,
+   const char *key, const char 
*allowed_key, const char *param,
+   const int multiple_values, const 
char prefix,
+   char *out, const size_t outlen) {
+  size_t tmp_buffer_size = 4096;
+  char *tmp_buffer = (char *) alloc_and_clear_memory(tmp_buffer_size, 
sizeof(char));
+  char *tmp_ptr = NULL;
+  char **values = NULL;
+  char **permitted_values = get_configuration_values_delimiter(allowed_key,
+   
CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, executor_cfg,
+   ",");
+  int i = 0, j = 0, permitted = 0, ret = 0;
+  if (multiple_values) {
+values = get_configuration_values_delimiter(key, 
DOCKER_COMMAND_FILE_SECTION, command_config, ",");
+  } else {
+values = (char **) alloc_and_clear_memory(2, sizeof(char *));
+values[0] = get_configuration_value(key, DOCKER_COMMAND_FILE_SECTION, 
command_config);
+values[1] = NULL;
+if (values[0] == NULL) {
+  ret = 0;
+  goto free_and_exit;
+}
+  }
+
+  if (values != NULL) {
+if (permitted_values != NULL) {
+  for (i = 0; values[i] != NULL; ++i) {
+

[2/4] hadoop git commit: YARN-6623. Add support to turn off launching privileged containers in the container-executor. (Varun Vasudev via wangda)

2017-09-28 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/091fc32c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
new file mode 100644
index 000..c627ca8
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -0,0 +1,1122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include 
+#include 
+#include "errno.h"
+
+extern "C" {
+#include "utils/docker-util.c"
+}
+
+namespace ContainerExecutor {
+
+  class TestDockerUtil : public ::testing::Test {
+  protected:
+virtual void SetUp() {
+  docker_command_file = "docker-command.cmd";
+  container_executor_cfg_file = "container-executor.cfg";
+  container_executor_cfg.size = 0;
+  container_executor_cfg.sections = NULL;
+}
+
+virtual void TearDown() {
+  remove(docker_command_file.c_str());
+  remove(container_executor_cfg_file.c_str());
+  delete_ce_file();
+}
+
+struct configuration container_executor_cfg;
+std::string docker_command_file;
+std::string container_executor_cfg_file;
+
+
+void write_file(const std::string fname, const std::string contents) {
+  std::ofstream command_file;
+  command_file.open(fname.c_str());
+  command_file << contents;
+  command_file.close();
+}
+
+int create_ce_file() {
+  int ret = 0;
+  const char *fname = HADOOP_CONF_DIR "/" CONF_FILENAME;
+  if (strcmp("../etc/hadoop/container-executor.cfg", fname) == 0) {
+ret = mkdir("../etc", 0755);
+if (ret == 0 || errno == EEXIST) {
+  ret = mkdir("../etc/hadoop", 0755);
+  if (ret == 0 || errno == EEXIST) {
+write_file("../etc/hadoop/container-executor.cfg", "");
+return 0;
+  } else {
+std::cerr << "Could not create ../etc/hadoop, " << strerror(errno) 
<< std::endl;
+  }
+} else {
+  std::cerr << "Could not create ../etc, " << strerror(errno) << 
std::endl;
+}
+  }
+  std::cerr << "Could not create " << fname << std::endl;
+  return 1;
+}
+
+void delete_ce_file() {
+  const char *fname = HADOOP_CONF_DIR "/" CONF_FILENAME;
+  if (strcmp("../etc/hadoop/container-executor.cfg", fname) == 0) {
+struct stat buffer;
+if (stat(fname, ) == 0) {
+  remove("../etc/hadoop/container-executor.cfg");
+  rmdir("../etc/hadoop");
+  rmdir("../etc");
+}
+  }
+}
+
+void write_container_executor_cfg(const std::string contents) {
+  write_file(container_executor_cfg_file, contents);
+}
+
+void write_command_file(const std::string contents) {
+  write_file(docker_command_file, contents);
+}
+
+void run_docker_command_test(const std::vector > _cmd_vec,
+ const std::vector 
> _file_cmd_vec,
+ int (*docker_func)(const char *, const struct 
configuration *, char *, const size_t)) {
+  char tmp[8192];
+  std::vector >::const_iterator itr;
+  for (itr = file_cmd_vec.begin(); itr != file_cmd_vec.end(); ++itr) {
+memset(tmp, 0, 8192);
+write_command_file(itr->first);
+int ret = (*docker_func)(docker_command_file.c_str(), 
_executor_cfg, tmp, 8192);
+ASSERT_EQ(0, ret) << "error message: " << 
get_docker_error_message(ret) << " for input " << itr->first;
+ASSERT_STREQ(itr->second.c_str(), tmp);
+  }
+
+  std::vector >::const_iterator itr2;
+  for (itr2 = bad_file_cmd_vec.begin(); itr2 != 

[4/4] hadoop git commit: YARN-6623. Add support to turn off launching privileged containers in the container-executor. (Varun Vasudev via wangda)

2017-09-28 Thread wangda
YARN-6623. Add support to turn off launching privileged containers in the 
container-executor. (Varun Vasudev via wangda)

Change-Id: I76aec68cbfb42d239279d7245a03290d691e87a4
(cherry picked from commit d3b1c6319546706c41a2011ead6c3fe208883200)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/091fc32c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/091fc32c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/091fc32c

Branch: refs/heads/branch-3.0
Commit: 091fc32ce8cca7c437e3c271692e221522ce1254
Parents: 3d23522
Author: Wangda Tan 
Authored: Thu Sep 28 16:41:09 2017 -0700
Committer: Wangda Tan 
Committed: Thu Sep 28 16:46:23 2017 -0700

--
 .../hadoop-yarn/conf/container-executor.cfg |   12 +
 .../src/CMakeLists.txt  |5 +-
 .../runtime/DockerLinuxContainerRuntime.java|   11 +-
 .../linux/runtime/docker/DockerClient.java  |   25 +-
 .../linux/runtime/docker/DockerCommand.java |   55 +-
 .../runtime/docker/DockerCommandExecutor.java   |3 +-
 .../runtime/docker/DockerInspectCommand.java|   13 +-
 .../linux/runtime/docker/DockerLoadCommand.java |2 +-
 .../linux/runtime/docker/DockerPullCommand.java |2 +-
 .../linux/runtime/docker/DockerRmCommand.java   |4 +-
 .../linux/runtime/docker/DockerRunCommand.java  |   68 +-
 .../linux/runtime/docker/DockerStopCommand.java |6 +-
 .../container-executor/impl/configuration.c |   17 +
 .../container-executor/impl/configuration.h |   19 +-
 .../impl/container-executor.c   |  316 +
 .../impl/container-executor.h   |9 -
 .../container-executor/impl/get_executable.c|3 -
 .../container-executor/impl/get_executable.h|   29 +
 .../main/native/container-executor/impl/main.c  |   31 +-
 .../impl/modules/common/module-configs.c|3 +-
 .../impl/modules/common/module-configs.h|1 +
 .../main/native/container-executor/impl/util.c  |   60 +-
 .../main/native/container-executor/impl/util.h  |   46 +-
 .../container-executor/impl/utils/docker-util.c |  998 
 .../container-executor/impl/utils/docker-util.h |  147 +++
 .../impl/utils/string-utils.c   |1 -
 .../docker-container-executor.cfg   |   13 +
 .../test/test-container-executor.c  |  149 +--
 .../native/container-executor/test/test_util.cc |   37 +-
 .../test/utils/test-string-utils.cc |   37 +-
 .../test/utils/test_docker_util.cc  | 1122 ++
 .../runtime/TestDockerContainerRuntime.java |  398 ---
 .../docker/TestDockerCommandExecutor.java   |   35 +-
 .../docker/TestDockerInspectCommand.java|   29 +-
 .../runtime/docker/TestDockerLoadCommand.java   |9 +-
 .../runtime/docker/TestDockerPullCommand.java   |8 +-
 .../runtime/docker/TestDockerRmCommand.java |8 +-
 .../runtime/docker/TestDockerRunCommand.java|   25 +-
 .../runtime/docker/TestDockerStopCommand.java   |   15 +-
 .../src/site/markdown/DockerContainers.md   |   33 +-
 40 files changed, 3067 insertions(+), 737 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/091fc32c/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
--
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg 
b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
index d68cee8..023654b 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
+++ b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
@@ -2,3 +2,15 @@ yarn.nodemanager.linux-container-executor.group=#configured 
value of yarn.nodema
 banned.users=#comma separated list of users who can not run applications
 min.user.id=1000#Prevent other super-users
 allowed.system.users=##comma separated list of system users who CAN run 
applications
+feature.tc.enabled=0
+
+# The configs below deal with settings for Docker
+#[docker]
+#  module.enabled=## enable/disable the module. set to "true" to enable, 
disabled by default
+#  docker.binary=/usr/bin/docker
+#  docker.allowed.capabilities=## comma seperated capabilities that can be 
granted, e.g 
CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
+#  docker.allowed.devices=## comma seperated list of devices that can be 
mounted into a container
+#  docker.allowed.networks=## comma seperated networks that can be used. e.g 
bridge,host,none
+#  docker.allowed.ro-mounts=## comma seperated volumes that can be mounted as 
read-only
+#  docker.allowed.rw-mounts=## comma seperate volumes that can be mounted as 
read-write, add the yarn local and log dirs to 

[1/4] hadoop git commit: YARN-6623. Add support to turn off launching privileged containers in the container-executor. (Varun Vasudev via wangda)

2017-09-28 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk c114da5e6 -> d3b1c6319


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3b1c631/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
index 60fce40..05b44b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
@@ -114,8 +114,10 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("rm " + MOCK_CONTAINER_ID, dockerCommands.get(0));
+assertEquals(3, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=rm", dockerCommands.get(1));
+assertEquals("  name=" + MOCK_CONTAINER_ID, dockerCommands.get(2));
   }
 
   @Test
@@ -130,8 +132,10 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("stop " + MOCK_CONTAINER_ID, dockerCommands.get(0));
+assertEquals(3, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=stop", dockerCommands.get(1));
+assertEquals("  name=" + MOCK_CONTAINER_ID, dockerCommands.get(2));
   }
 
   @Test
@@ -147,9 +151,12 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("inspect --format='{{.State.Status}}' " + MOCK_CONTAINER_ID,
-dockerCommands.get(0));
+assertEquals(4, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=inspect", dockerCommands.get(1));
+assertEquals("  format={{.State.Status}}", dockerCommands.get(2));
+assertEquals("  name=" + MOCK_CONTAINER_ID, dockerCommands.get(3));
+
   }
 
   @Test
@@ -165,8 +172,10 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("pull " + MOCK_IMAGE_NAME, dockerCommands.get(0));
+assertEquals(3, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=pull", dockerCommands.get(1));
+assertEquals("  image=" + MOCK_IMAGE_NAME, dockerCommands.get(2));
   }
 
   @Test
@@ -182,8 +191,12 @@ public class TestDockerCommandExecutor {
 assertEquals(1, ops.size());
 assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
 ops.get(0).getOperationType().name());
-assertEquals(1, dockerCommands.size());
-assertEquals("load --i=" + MOCK_LOCAL_IMAGE_NAME, dockerCommands.get(0));
+assertEquals(3, dockerCommands.size());
+assertEquals("[docker-command-execution]", dockerCommands.get(0));
+assertEquals("  docker-command=load", dockerCommands.get(1));
+assertEquals("  image=" + MOCK_LOCAL_IMAGE_NAME, dockerCommands.get(2));
+
+
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3b1c631/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerInspectCommand.java
--
diff --git 

[2/4] hadoop git commit: YARN-6623. Add support to turn off launching privileged containers in the container-executor. (Varun Vasudev via wangda)

2017-09-28 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3b1c631/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
new file mode 100644
index 000..c627ca8
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -0,0 +1,1122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include 
+#include 
+#include "errno.h"
+
+extern "C" {
+#include "utils/docker-util.c"
+}
+
+namespace ContainerExecutor {
+
+  class TestDockerUtil : public ::testing::Test {
+  protected:
+virtual void SetUp() {
+  docker_command_file = "docker-command.cmd";
+  container_executor_cfg_file = "container-executor.cfg";
+  container_executor_cfg.size = 0;
+  container_executor_cfg.sections = NULL;
+}
+
+virtual void TearDown() {
+  remove(docker_command_file.c_str());
+  remove(container_executor_cfg_file.c_str());
+  delete_ce_file();
+}
+
+struct configuration container_executor_cfg;
+std::string docker_command_file;
+std::string container_executor_cfg_file;
+
+
+void write_file(const std::string fname, const std::string contents) {
+  std::ofstream command_file;
+  command_file.open(fname.c_str());
+  command_file << contents;
+  command_file.close();
+}
+
+int create_ce_file() {
+  int ret = 0;
+  const char *fname = HADOOP_CONF_DIR "/" CONF_FILENAME;
+  if (strcmp("../etc/hadoop/container-executor.cfg", fname) == 0) {
+ret = mkdir("../etc", 0755);
+if (ret == 0 || errno == EEXIST) {
+  ret = mkdir("../etc/hadoop", 0755);
+  if (ret == 0 || errno == EEXIST) {
+write_file("../etc/hadoop/container-executor.cfg", "");
+return 0;
+  } else {
+std::cerr << "Could not create ../etc/hadoop, " << strerror(errno) 
<< std::endl;
+  }
+} else {
+  std::cerr << "Could not create ../etc, " << strerror(errno) << 
std::endl;
+}
+  }
+  std::cerr << "Could not create " << fname << std::endl;
+  return 1;
+}
+
+void delete_ce_file() {
+  const char *fname = HADOOP_CONF_DIR "/" CONF_FILENAME;
+  if (strcmp("../etc/hadoop/container-executor.cfg", fname) == 0) {
+struct stat buffer;
+if (stat(fname, ) == 0) {
+  remove("../etc/hadoop/container-executor.cfg");
+  rmdir("../etc/hadoop");
+  rmdir("../etc");
+}
+  }
+}
+
+void write_container_executor_cfg(const std::string contents) {
+  write_file(container_executor_cfg_file, contents);
+}
+
+void write_command_file(const std::string contents) {
+  write_file(docker_command_file, contents);
+}
+
+void run_docker_command_test(const std::vector > _cmd_vec,
+ const std::vector 
> _file_cmd_vec,
+ int (*docker_func)(const char *, const struct 
configuration *, char *, const size_t)) {
+  char tmp[8192];
+  std::vector >::const_iterator itr;
+  for (itr = file_cmd_vec.begin(); itr != file_cmd_vec.end(); ++itr) {
+memset(tmp, 0, 8192);
+write_command_file(itr->first);
+int ret = (*docker_func)(docker_command_file.c_str(), 
_executor_cfg, tmp, 8192);
+ASSERT_EQ(0, ret) << "error message: " << 
get_docker_error_message(ret) << " for input " << itr->first;
+ASSERT_STREQ(itr->second.c_str(), tmp);
+  }
+
+  std::vector >::const_iterator itr2;
+  for (itr2 = bad_file_cmd_vec.begin(); itr2 != 

[4/4] hadoop git commit: YARN-6623. Add support to turn off launching privileged containers in the container-executor. (Varun Vasudev via wangda)

2017-09-28 Thread wangda
YARN-6623. Add support to turn off launching privileged containers in the 
container-executor. (Varun Vasudev via wangda)

Change-Id: I76aec68cbfb42d239279d7245a03290d691e87a4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3b1c631
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3b1c631
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3b1c631

Branch: refs/heads/trunk
Commit: d3b1c6319546706c41a2011ead6c3fe208883200
Parents: c114da5
Author: Wangda Tan 
Authored: Thu Sep 28 16:41:09 2017 -0700
Committer: Wangda Tan 
Committed: Thu Sep 28 16:41:09 2017 -0700

--
 .../hadoop-yarn/conf/container-executor.cfg |   12 +
 .../src/CMakeLists.txt  |5 +-
 .../runtime/DockerLinuxContainerRuntime.java|   11 +-
 .../linux/runtime/docker/DockerClient.java  |   25 +-
 .../linux/runtime/docker/DockerCommand.java |   55 +-
 .../runtime/docker/DockerCommandExecutor.java   |3 +-
 .../runtime/docker/DockerInspectCommand.java|   13 +-
 .../linux/runtime/docker/DockerLoadCommand.java |2 +-
 .../linux/runtime/docker/DockerPullCommand.java |2 +-
 .../linux/runtime/docker/DockerRmCommand.java   |4 +-
 .../linux/runtime/docker/DockerRunCommand.java  |   68 +-
 .../linux/runtime/docker/DockerStopCommand.java |6 +-
 .../container-executor/impl/configuration.c |   17 +
 .../container-executor/impl/configuration.h |   19 +-
 .../impl/container-executor.c   |  316 +
 .../impl/container-executor.h   |9 -
 .../container-executor/impl/get_executable.c|3 -
 .../container-executor/impl/get_executable.h|   29 +
 .../main/native/container-executor/impl/main.c  |   31 +-
 .../impl/modules/common/module-configs.c|3 +-
 .../impl/modules/common/module-configs.h|1 +
 .../main/native/container-executor/impl/util.c  |   60 +-
 .../main/native/container-executor/impl/util.h  |   46 +-
 .../container-executor/impl/utils/docker-util.c |  998 
 .../container-executor/impl/utils/docker-util.h |  147 +++
 .../impl/utils/string-utils.c   |1 -
 .../docker-container-executor.cfg   |   13 +
 .../test/test-container-executor.c  |  149 +--
 .../native/container-executor/test/test_util.cc |   37 +-
 .../test/utils/test-string-utils.cc |   37 +-
 .../test/utils/test_docker_util.cc  | 1122 ++
 .../runtime/TestDockerContainerRuntime.java |  398 ---
 .../docker/TestDockerCommandExecutor.java   |   35 +-
 .../docker/TestDockerInspectCommand.java|   29 +-
 .../runtime/docker/TestDockerLoadCommand.java   |9 +-
 .../runtime/docker/TestDockerPullCommand.java   |8 +-
 .../runtime/docker/TestDockerRmCommand.java |8 +-
 .../runtime/docker/TestDockerRunCommand.java|   25 +-
 .../runtime/docker/TestDockerStopCommand.java   |   15 +-
 .../src/site/markdown/DockerContainers.md   |   33 +-
 40 files changed, 3067 insertions(+), 737 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3b1c631/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
--
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg 
b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
index d68cee8..023654b 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
+++ b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
@@ -2,3 +2,15 @@ yarn.nodemanager.linux-container-executor.group=#configured 
value of yarn.nodema
 banned.users=#comma separated list of users who can not run applications
 min.user.id=1000#Prevent other super-users
 allowed.system.users=##comma separated list of system users who CAN run 
applications
+feature.tc.enabled=0
+
+# The configs below deal with settings for Docker
+#[docker]
+#  module.enabled=## enable/disable the module. set to "true" to enable, 
disabled by default
+#  docker.binary=/usr/bin/docker
+#  docker.allowed.capabilities=## comma seperated capabilities that can be 
granted, e.g 
CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
+#  docker.allowed.devices=## comma seperated list of devices that can be 
mounted into a container
+#  docker.allowed.networks=## comma seperated networks that can be used. e.g 
bridge,host,none
+#  docker.allowed.ro-mounts=## comma seperated volumes that can be mounted as 
read-only
+#  docker.allowed.rw-mounts=## comma seperate volumes that can be mounted as 
read-write, add the yarn local and log dirs to this list to run Hadoop jobs
+#  docker.privileged-containers.enabled=0


[3/4] hadoop git commit: YARN-6623. Add support to turn off launching privileged containers in the container-executor. (Varun Vasudev via wangda)

2017-09-28 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3b1c631/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
new file mode 100644
index 000..860320d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -0,0 +1,998 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include "../modules/common/module-configs.h"
+#include "docker-util.h"
+#include "string-utils.h"
+#include "util.h"
+
+static int read_and_verify_command_file(const char *command_file, const char 
*docker_command,
+struct configuration *command_config) {
+  int ret = 0;
+  ret = read_config(command_file, command_config);
+  if (ret != 0) {
+return INVALID_COMMAND_FILE;
+  }
+  char *command = get_configuration_value("docker-command", 
DOCKER_COMMAND_FILE_SECTION, command_config);
+  if (command == NULL || (strcmp(command, docker_command) != 0)) {
+ret = INCORRECT_COMMAND;
+  }
+  free(command);
+  return ret;
+}
+
+static int add_to_buffer(char *buff, const size_t bufflen, const char *string) 
{
+  size_t current_len = strlen(buff);
+  size_t string_len = strlen(string);
+  if (current_len + string_len < bufflen - 1) {
+strncpy(buff + current_len, string, string_len);
+buff[current_len + string_len] = '\0';
+return 0;
+  }
+  return -1;
+}
+
+static int add_param_to_command(const struct configuration *command_config, 
const char *key, const char *param,
+const int with_argument, char *out, const 
size_t outlen) {
+  size_t tmp_buffer_size = 4096;
+  int ret = 0;
+  char *tmp_buffer = (char *) alloc_and_clear_memory(tmp_buffer_size, 
sizeof(char));
+  char *value = get_configuration_value(key, DOCKER_COMMAND_FILE_SECTION, 
command_config);
+  if (value != NULL) {
+if (with_argument) {
+  quote_and_append_arg(_buffer, _buffer_size, param, value);
+  ret = add_to_buffer(out, outlen, tmp_buffer);
+} else if (strcmp(value, "true") == 0) {
+  ret = add_to_buffer(out, outlen, param);
+}
+free(value);
+if (ret != 0) {
+  ret = BUFFER_TOO_SMALL;
+}
+  }
+  free(tmp_buffer);
+  return ret;
+}
+
+static int add_param_to_command_if_allowed(const struct configuration 
*command_config,
+   const struct configuration 
*executor_cfg,
+   const char *key, const char 
*allowed_key, const char *param,
+   const int multiple_values, const 
char prefix,
+   char *out, const size_t outlen) {
+  size_t tmp_buffer_size = 4096;
+  char *tmp_buffer = (char *) alloc_and_clear_memory(tmp_buffer_size, 
sizeof(char));
+  char *tmp_ptr = NULL;
+  char **values = NULL;
+  char **permitted_values = get_configuration_values_delimiter(allowed_key,
+   
CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, executor_cfg,
+   ",");
+  int i = 0, j = 0, permitted = 0, ret = 0;
+  if (multiple_values) {
+values = get_configuration_values_delimiter(key, 
DOCKER_COMMAND_FILE_SECTION, command_config, ",");
+  } else {
+values = (char **) alloc_and_clear_memory(2, sizeof(char *));
+values[0] = get_configuration_value(key, DOCKER_COMMAND_FILE_SECTION, 
command_config);
+values[1] = NULL;
+if (values[0] == NULL) {
+  ret = 0;
+  goto free_and_exit;
+}
+  }
+
+  if (values != NULL) {
+if (permitted_values != NULL) {
+  for (i = 0; values[i] != NULL; ++i) {
+

hadoop git commit: YARN-6626. Embed REST API service into RM. Contributed by Eric Yang

2017-09-28 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/yarn-native-services 281c1d1e8 -> 63d1084e9


YARN-6626. Embed REST API service into RM. Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63d1084e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63d1084e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63d1084e

Branch: refs/heads/yarn-native-services
Commit: 63d1084e9781e0fee876916190b69f6242dd00e4
Parents: 281c1d1
Author: Jian He 
Authored: Thu Sep 28 16:29:22 2017 -0700
Committer: Jian He 
Committed: Thu Sep 28 16:40:28 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  2 ++
 .../hadoop/yarn/service/webapp/ApiServer.java   |  7 
 .../hadoop-yarn-services-core/pom.xml   |  6 
 .../yarn/service/api/records/Artifact.java  |  8 +
 .../yarn/service/api/records/ConfigFile.java|  8 +
 .../service/api/records/ReadinessCheck.java |  8 +
 .../src/main/resources/yarn-default.xml |  8 +
 .../server/resourcemanager/webapp/RMWebApp.java | 19 +++
 .../site/markdown/yarn-service/QuickStart.md| 34 +---
 9 files changed, 89 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63d1084e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f58833c..c7f0075 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -336,6 +336,8 @@ public class YarnConfiguration extends Configuration {
 
   public static final String YARN_WEBAPP_UI2_WARFILE_PATH = "yarn."
   + "webapp.ui2.war-file-path";
+  public static final String YARN_API_SERVICES_ENABLE = "yarn."
+  + "webapp.api-service.enable";
 
   public static final String RM_RESOURCE_TRACKER_ADDRESS =
 RM_PREFIX + "resource-tracker.address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63d1084e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index e8286ef..89b020d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -17,6 +17,7 @@
 
 package org.apache.hadoop.yarn.service.webapp;
 
+import com.google.inject.Inject;
 import com.google.inject.Singleton;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.VersionInfo;
@@ -57,6 +58,12 @@ import static 
org.apache.hadoop.yarn.service.conf.RestApiConstants.*;
 @Singleton
 @Path(CONTEXT_ROOT)
 public class ApiServer {
+
+  @Inject
+  public ApiServer(Configuration conf) {
+super();
+  }
+
   private static final Logger LOG =
   LoggerFactory.getLogger(ApiServer.class);
   private static Configuration YARN_CONFIG = new YarnConfiguration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63d1084e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
index fb07edc..205a64d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
@@ -245,12 +245,6 @@
 
 
 
-  org.apache.hadoop
-  

hadoop git commit: HDFS-12488. Ozone: OzoneRestClient timeout is not configurable. Contributed by Weiwei Yang.

2017-09-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 5519edce5 -> ae1e3cfb7


HDFS-12488. Ozone: OzoneRestClient timeout is not configurable. Contributed by 
Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae1e3cfb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae1e3cfb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae1e3cfb

Branch: refs/heads/HDFS-7240
Commit: ae1e3cfb73cf716530fcf8e54332a0e899485b78
Parents: 5519edc
Author: Anu Engineer 
Authored: Thu Sep 28 15:42:31 2017 -0700
Committer: Anu Engineer 
Committed: Thu Sep 28 15:42:31 2017 -0700

--
 .../main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae1e3cfb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
index 9ccd23c..19bfc18 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
@@ -699,7 +699,7 @@ public final class OzoneClientUtils {
* @return a default instance of {@link CloseableHttpClient}.
*/
   public static CloseableHttpClient newHttpClient() {
-return OzoneClientUtils.newHttpClient(null);
+return OzoneClientUtils.newHttpClient(new OzoneConfiguration());
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7250. Update Shared cache client api to use URLs.

2017-09-28 Thread ctrezzo
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 11ac10fe1 -> cba1891b6


YARN-7250. Update Shared cache client api to use URLs.

(cherry picked from commit c114da5e64d14b1d9e614081c4171ea0391cb1aa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cba1891b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cba1891b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cba1891b

Branch: refs/heads/branch-2
Commit: cba1891b61e8c049d79b110da9dd4832467a9f08
Parents: 11ac10f
Author: Chris Trezzo 
Authored: Thu Sep 28 15:28:06 2017 -0700
Committer: Chris Trezzo 
Committed: Thu Sep 28 15:32:18 2017 -0700

--
 .../yarn/client/api/SharedCacheClient.java  | 22 
 .../client/api/impl/SharedCacheClientImpl.java  | 36 +---
 .../api/impl/TestSharedCacheClientImpl.java | 31 +
 3 files changed, 23 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cba1891b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
index 60c1bd98..a9c1a07 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.client.api.impl.SharedCacheClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 
@@ -58,34 +59,25 @@ public abstract class SharedCacheClient extends 
AbstractService {
*
* 
* The SharedCacheManager responds with whether or not the
-   * resource exists in the cache. If the resource exists, a Path
-   * to the resource in the shared cache is returned. If the resource does not
+   * resource exists in the cache. If the resource exists, a URL 
to
+   * the resource in the shared cache is returned. If the resource does not
* exist, null is returned instead.
* 
*
* 
-   * Once a path has been returned for a resource, that path is safe to use for
+   * Once a URL has been returned for a resource, that URL is safe to use for
* the lifetime of the application that corresponds to the provided
* ApplicationId.
* 
*
-   * 
-   * Additionally, a name for the resource should be specified. A fragment will
-   * be added to the path with the desired name if the desired name is 
different
-   * than the name of the provided path from the shared cache. This ensures 
that
-   * if the returned path is used to create a LocalResource, then the symlink
-   * created during YARN localization will match the name specified.
-   * 
-   *
* @param applicationId ApplicationId of the application using the resource
* @param resourceKey the key (i.e. checksum) that identifies the resource
-   * @param resourceName the desired name of the resource
-   * @return Path to the resource, or null if it does not exist
+   * @return URL to the resource, or null if it does not exist
*/
   @Public
   @Unstable
-  public abstract Path use(ApplicationId applicationId, String resourceKey,
-  String resourceName) throws YarnException;
+  public abstract URL use(ApplicationId applicationId, String resourceKey)
+  throws YarnException;
 
   /**
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cba1891b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
index b910c28..3191d36 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
+++ 

hadoop git commit: YARN-7250. Update Shared cache client api to use URLs.

2017-09-28 Thread ctrezzo
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 a6630f703 -> 3d2352211


YARN-7250. Update Shared cache client api to use URLs.

(cherry picked from commit c114da5e64d14b1d9e614081c4171ea0391cb1aa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d235221
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d235221
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d235221

Branch: refs/heads/branch-3.0
Commit: 3d235221106bd1ff2d99f660e89c6f3306f578ad
Parents: a6630f7
Author: Chris Trezzo 
Authored: Thu Sep 28 15:28:06 2017 -0700
Committer: Chris Trezzo 
Committed: Thu Sep 28 15:31:23 2017 -0700

--
 .../yarn/client/api/SharedCacheClient.java  | 22 
 .../client/api/impl/SharedCacheClientImpl.java  | 36 +---
 .../api/impl/TestSharedCacheClientImpl.java | 31 +
 3 files changed, 23 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d235221/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
index 60c1bd98..a9c1a07 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.client.api.impl.SharedCacheClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 
@@ -58,34 +59,25 @@ public abstract class SharedCacheClient extends 
AbstractService {
*
* 
* The SharedCacheManager responds with whether or not the
-   * resource exists in the cache. If the resource exists, a Path
-   * to the resource in the shared cache is returned. If the resource does not
+   * resource exists in the cache. If the resource exists, a URL 
to
+   * the resource in the shared cache is returned. If the resource does not
* exist, null is returned instead.
* 
*
* 
-   * Once a path has been returned for a resource, that path is safe to use for
+   * Once a URL has been returned for a resource, that URL is safe to use for
* the lifetime of the application that corresponds to the provided
* ApplicationId.
* 
*
-   * 
-   * Additionally, a name for the resource should be specified. A fragment will
-   * be added to the path with the desired name if the desired name is 
different
-   * than the name of the provided path from the shared cache. This ensures 
that
-   * if the returned path is used to create a LocalResource, then the symlink
-   * created during YARN localization will match the name specified.
-   * 
-   *
* @param applicationId ApplicationId of the application using the resource
* @param resourceKey the key (i.e. checksum) that identifies the resource
-   * @param resourceName the desired name of the resource
-   * @return Path to the resource, or null if it does not exist
+   * @return URL to the resource, or null if it does not exist
*/
   @Public
   @Unstable
-  public abstract Path use(ApplicationId applicationId, String resourceKey,
-  String resourceName) throws YarnException;
+  public abstract URL use(ApplicationId applicationId, String resourceKey)
+  throws YarnException;
 
   /**
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d235221/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
index b910c28..3191d36 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
+++ 

hadoop git commit: YARN-7250. Update Shared cache client api to use URLs.

2017-09-28 Thread ctrezzo
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6f789fe05 -> c114da5e6


YARN-7250. Update Shared cache client api to use URLs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c114da5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c114da5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c114da5e

Branch: refs/heads/trunk
Commit: c114da5e64d14b1d9e614081c4171ea0391cb1aa
Parents: 6f789fe
Author: Chris Trezzo 
Authored: Thu Sep 28 15:28:06 2017 -0700
Committer: Chris Trezzo 
Committed: Thu Sep 28 15:28:06 2017 -0700

--
 .../yarn/client/api/SharedCacheClient.java  | 22 
 .../client/api/impl/SharedCacheClientImpl.java  | 36 +---
 .../api/impl/TestSharedCacheClientImpl.java | 31 +
 3 files changed, 23 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c114da5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
index 60c1bd98..a9c1a07 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.client.api.impl.SharedCacheClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 
@@ -58,34 +59,25 @@ public abstract class SharedCacheClient extends 
AbstractService {
*
* 
* The SharedCacheManager responds with whether or not the
-   * resource exists in the cache. If the resource exists, a Path
-   * to the resource in the shared cache is returned. If the resource does not
+   * resource exists in the cache. If the resource exists, a URL 
to
+   * the resource in the shared cache is returned. If the resource does not
* exist, null is returned instead.
* 
*
* 
-   * Once a path has been returned for a resource, that path is safe to use for
+   * Once a URL has been returned for a resource, that URL is safe to use for
* the lifetime of the application that corresponds to the provided
* ApplicationId.
* 
*
-   * 
-   * Additionally, a name for the resource should be specified. A fragment will
-   * be added to the path with the desired name if the desired name is 
different
-   * than the name of the provided path from the shared cache. This ensures 
that
-   * if the returned path is used to create a LocalResource, then the symlink
-   * created during YARN localization will match the name specified.
-   * 
-   *
* @param applicationId ApplicationId of the application using the resource
* @param resourceKey the key (i.e. checksum) that identifies the resource
-   * @param resourceName the desired name of the resource
-   * @return Path to the resource, or null if it does not exist
+   * @return URL to the resource, or null if it does not exist
*/
   @Public
   @Unstable
-  public abstract Path use(ApplicationId applicationId, String resourceKey,
-  String resourceName) throws YarnException;
+  public abstract URL use(ApplicationId applicationId, String resourceKey)
+  throws YarnException;
 
   /**
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c114da5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
index b910c28..3191d36 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
@@ -21,8 +21,6 @@ package 

[hadoop] Git Push Summary

2017-09-28 Thread wang
Repository: hadoop
Updated Tags:  refs/tags/release-3.0.0-beta1-RC0 [created] 228787ae5

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly calculated. Contributed by Hanisha Koneru

2017-09-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 150222c0c -> ffe42e62c


HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly 
calculated. Contributed by Hanisha Koneru

(cherry picked from commit 6f789fe05766a61b12ca10df3f26ee354eac84aa)

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffe42e62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffe42e62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffe42e62

Branch: refs/heads/branch-2.7
Commit: ffe42e62c994670ce2bfa83ee961d1651d14b026
Parents: 150222c
Author: Jason Lowe 
Authored: Thu Sep 28 16:38:30 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 28 16:48:03 2017 -0500

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  6 ++
 .../apache/hadoop/fs/loadGenerator/LoadGenerator.java| 11 ---
 2 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffe42e62/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 928ae54..f205288 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -20,6 +20,12 @@ Release 2.7.5 - UNRELEASED
 HADOOP-14702. Fix formatting issue and regression caused by conversion from
 APT to Markdown. (Doris Gu via iwasakims)
 
+HADOOP-14881. LoadGenerator should use Time.monotonicNow() to measure
+durations. (Bharat Viswanadham via jlowe)
+
+HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly
+calculated. (Hanisha Koneru via jlowe)
+
 Release 2.7.4 - 2017-08-04
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffe42e62/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
index 740100a..8f00d91 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
@@ -322,6 +322,7 @@ public class LoadGenerator extends Configured implements 
Tool {
 private void genFile(Path file, long fileSize) throws IOException {
   long startTimestamp = Time.monotonicNow();
   FSDataOutputStream out = null;
+  boolean isOutClosed = false;
   try {
 out = fc.create(file,
 EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
@@ -337,11 +338,15 @@ public class LoadGenerator extends Configured implements 
Tool {
   i -= s;
 }
 
-startTimestamp = Time.monotonicNow();
-executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTimestamp);
+startTime = Time.monotonicNow();
+out.close();
+executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTime);
 numOfOps[WRITE_CLOSE]++;
+isOutClosed = true;
   } finally {
-IOUtils.cleanup(LOG, out);
+if (!isOutClosed && out != null) {
+  out.close();
+}
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly calculated. Contributed by Hanisha Koneru

2017-09-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 ba5656d5e -> 949525b81


HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly 
calculated. Contributed by Hanisha Koneru

(cherry picked from commit 6f789fe05766a61b12ca10df3f26ee354eac84aa)

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/949525b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/949525b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/949525b8

Branch: refs/heads/branch-2.8
Commit: 949525b81a2a14c03c01a65cc2f8e8d77026768b
Parents: ba5656d
Author: Jason Lowe 
Authored: Thu Sep 28 16:38:30 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 28 16:45:02 2017 -0500

--
 .../apache/hadoop/fs/loadGenerator/LoadGenerator.java| 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/949525b8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
index 740100a..8f00d91 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
@@ -322,6 +322,7 @@ public class LoadGenerator extends Configured implements 
Tool {
 private void genFile(Path file, long fileSize) throws IOException {
   long startTimestamp = Time.monotonicNow();
   FSDataOutputStream out = null;
+  boolean isOutClosed = false;
   try {
 out = fc.create(file,
 EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
@@ -337,11 +338,15 @@ public class LoadGenerator extends Configured implements 
Tool {
   i -= s;
 }
 
-startTimestamp = Time.monotonicNow();
-executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTimestamp);
+startTime = Time.monotonicNow();
+out.close();
+executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTime);
 numOfOps[WRITE_CLOSE]++;
+isOutClosed = true;
   } finally {
-IOUtils.cleanup(LOG, out);
+if (!isOutClosed && out != null) {
+  out.close();
+}
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[Hadoop Wiki] Update of "HowToRelease" by AndrewWang

2017-09-28 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "HowToRelease" page has been changed by AndrewWang:
https://wiki.apache.org/hadoop/HowToRelease?action=diff=94=95

Comment:
More detailed instructions on how to bulk update JIRA versions

  
  = Preparation =
   1. If you have not already done so, 
[[http://www.apache.org/dev/release-signing.html#keys-policy|append your code 
signing key]] to the 
[[https://dist.apache.org/repos/dist/release/hadoop/common/KEYS|KEYS]] file. 
Once you commit your changes, they will automatically be propagated to the 
website. Also 
[[http://www.apache.org/dev/release-signing.html#keys-policy|upload your key to 
a public key server]] if you haven't. End users use the KEYS file (along with 
the [[http://www.apache.org/dev/release-signing.html#web-of-trust|web of 
trust]]) to validate that releases were done by an Apache committer. For more 
details on signing releases, see 
[[http://www.apache.org/dev/release-signing.html|Signing Releases]] and 
[[http://www.apache.org/dev/mirror-step-by-step.html?Step-By-Step|Step-By-Step 
Guide to Mirroring Releases]].
-  1. Bulk update JIRA to unassign from this release all issues that are open 
non-blockers
+  1. Bulk update JIRA to unassign from this release all issues that are open 
non-blockers. This is involved since you can only bulk change issues within the 
same project, so minimally requires four bulk changes for each of HADOOP, HDFS, 
MAPREDUCE, and YARN. Editing the "Target Version/s" field is also a blind 
write, so you need to be careful not to lose any other fix versions that are 
set. For updating 3.0.0-beta1 to 3.0.0, the process looked like this:
+   1. Start with this query: 
+   {{{
+ project in (HADOOP, HDFS, YARN, MAPREDUCE) AND "Target Version/s" = 
3.0.0-beta1 and statusCategory != Done
+ }}}
+   1. Filter this list down until it's only issues with a Target Version of 
just "3.0.0-beta1". My query ended up looking like:
+   {{{
+ project in (HADOOP, HDFS, YARN, MAPREDUCE) AND "Target Version/s" = 
3.0.0-beta1 and "Target Versions/" not in (2.9.0, 2.8.3, 2.8.2) AND 
statusCategory != Done
+ }}}
+   1. Do the bulk update for each project individually to set the target 
version to 3.0.0.
+   1. Check the query for the next most common set of target versions and 
again filter it down:
+   {{{
+ project in (HADOOP, HDFS, YARN, MAPREDUCE) AND "Target Version/s" = 
3.0.0-beta1 and "Target Version/s" = 2.9.0 and statusCategory != Done
+ project in (HADOOP, HDFS, YARN, MAPREDUCE) AND "Target Version/s" = 
3.0.0-beta1 and "Target Version/s" = 2.9.0 and "Target Version/s" not in 
(2.8.2, 2.8.3) and statusCategory != Done
+ }}}
+   1. Do the bulk update for each project individually to set the target 
version field to (3.0.0, 2.9.0).
+   1. Return to the original query. If there aren't too many, update the 
remaining straggler issues by hand (faster than doing the bulk edits):
+   {{{
+ project in (HADOOP, HDFS, YARN, MAPREDUCE) AND "Target Version/s" = 
3.0.0-beta1 and statusCategory != Done
+ }}}
+ 
   1. Send follow-up notification to the developer list that this was done.
   1. To deploy artifacts to the Apache Maven repository create 
{{{~/.m2/settings.xml}}}:
   {{{

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14881. LoadGenerator should use Time.monotonicNow() to measure durations. Contributed by Bharat Viswanadham

2017-09-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 fb5de93c4 -> ba5656d5e


HADOOP-14881. LoadGenerator should use Time.monotonicNow() to measure 
durations. Contributed by Bharat Viswanadham

(cherry picked from commit ac05a51bbb2a3fad4e85f9334a3408571967900a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba5656d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba5656d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba5656d5

Branch: refs/heads/branch-2.8
Commit: ba5656d5e0c28e3523eff2013c6fa35367f334dc
Parents: fb5de93
Author: Jason Lowe 
Authored: Mon Sep 25 15:35:44 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 28 16:27:08 2017 -0500

--
 .../hadoop/fs/loadGenerator/LoadGenerator.java  | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba5656d5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
index ca01702..740100a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
@@ -277,9 +277,9 @@ public class LoadGenerator extends Configured implements 
Tool {
  * the entire file */
 private void read() throws IOException {
   String fileName = files.get(r.nextInt(files.size()));
-  long startTime = Time.now();
+  long startTimestamp = Time.monotonicNow();
   InputStream in = fc.open(new Path(fileName));
-  executionTime[OPEN] += (Time.now()-startTime);
+  executionTime[OPEN] += (Time.monotonicNow() - startTimestamp);
   totalNumOfOps[OPEN]++;
   while (in.read(buffer) != -1) {}
   in.close();
@@ -299,9 +299,9 @@ public class LoadGenerator extends Configured implements 
Tool {
   double fileSize = 0;
   while ((fileSize = r.nextGaussian()+2)<=0) {}
   genFile(file, (long)(fileSize*BLOCK_SIZE));
-  long startTime = Time.now();
+  long startTimestamp = Time.monotonicNow();
   fc.delete(file, true);
-  executionTime[DELETE] += (Time.now()-startTime);
+  executionTime[DELETE] += (Time.monotonicNow() - startTimestamp);
   totalNumOfOps[DELETE]++;
 }
 
@@ -310,9 +310,9 @@ public class LoadGenerator extends Configured implements 
Tool {
  */
 private void list() throws IOException {
   String dirName = dirs.get(r.nextInt(dirs.size()));
-  long startTime = Time.now();
+  long startTimestamp = Time.monotonicNow();
   fc.listStatus(new Path(dirName));
-  executionTime[LIST] += (Time.now()-startTime);
+  executionTime[LIST] += (Time.monotonicNow() - startTimestamp);
   totalNumOfOps[LIST]++;
 }
 
@@ -320,14 +320,14 @@ public class LoadGenerator extends Configured implements 
Tool {
  * The file is filled with 'a'.
  */
 private void genFile(Path file, long fileSize) throws IOException {
-  long startTime = Time.now();
+  long startTimestamp = Time.monotonicNow();
   FSDataOutputStream out = null;
   try {
 out = fc.create(file,
 EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
 CreateOpts.createParent(), CreateOpts.bufferSize(4096),
 CreateOpts.repFac((short) 3));
-executionTime[CREATE] += (Time.now() - startTime);
+executionTime[CREATE] += (Time.monotonicNow() - startTimestamp);
 numOfOps[CREATE]++;
 
 long i = fileSize;
@@ -337,8 +337,8 @@ public class LoadGenerator extends Configured implements 
Tool {
   i -= s;
 }
 
-startTime = Time.now();
-executionTime[WRITE_CLOSE] += (Time.now() - startTime);
+startTimestamp = Time.monotonicNow();
+executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTimestamp);
 numOfOps[WRITE_CLOSE]++;
   } finally {
 IOUtils.cleanup(LOG, out);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly calculated. Contributed by Hanisha Koneru

2017-09-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk ca669f9f8 -> 6f789fe05


HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly 
calculated. Contributed by Hanisha Koneru


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f789fe0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f789fe0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f789fe0

Branch: refs/heads/trunk
Commit: 6f789fe05766a61b12ca10df3f26ee354eac84aa
Parents: ca669f9
Author: Jason Lowe 
Authored: Thu Sep 28 16:38:30 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 28 16:38:30 2017 -0500

--
 .../apache/hadoop/fs/loadGenerator/LoadGenerator.java| 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f789fe0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
index 0bb1b46..b74e75d 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
@@ -322,6 +322,7 @@ public class LoadGenerator extends Configured implements 
Tool {
 private void genFile(Path file, long fileSize) throws IOException {
   long startTimestamp = Time.monotonicNow();
   FSDataOutputStream out = null;
+  boolean isOutClosed = false;
   try {
 out = fc.create(file,
 EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
@@ -337,11 +338,15 @@ public class LoadGenerator extends Configured implements 
Tool {
   i -= s;
 }
 
-startTimestamp = Time.monotonicNow();
-executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTimestamp);
+startTime = Time.monotonicNow();
+out.close();
+executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTime);
 numOfOps[WRITE_CLOSE]++;
+isOutClosed = true;
   } finally {
-IOUtils.cleanupWithLogger(LOG, out);
+if (!isOutClosed && out != null) {
+  out.close();
+}
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14881. LoadGenerator should use Time.monotonicNow() to measure durations. Contributed by Bharat Viswanadham

2017-09-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 897c10c82 -> 150222c0c


HADOOP-14881. LoadGenerator should use Time.monotonicNow() to measure 
durations. Contributed by Bharat Viswanadham

(cherry picked from commit ac05a51bbb2a3fad4e85f9334a3408571967900a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/150222c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/150222c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/150222c0

Branch: refs/heads/branch-2.7
Commit: 150222c0c8024ee05b69dbdb8deec1dc9451edf9
Parents: 897c10c
Author: Jason Lowe 
Authored: Mon Sep 25 15:35:44 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 28 16:33:21 2017 -0500

--
 .../hadoop/fs/loadGenerator/LoadGenerator.java  | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/150222c0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
index ca01702..740100a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
@@ -277,9 +277,9 @@ public class LoadGenerator extends Configured implements 
Tool {
  * the entire file */
 private void read() throws IOException {
   String fileName = files.get(r.nextInt(files.size()));
-  long startTime = Time.now();
+  long startTimestamp = Time.monotonicNow();
   InputStream in = fc.open(new Path(fileName));
-  executionTime[OPEN] += (Time.now()-startTime);
+  executionTime[OPEN] += (Time.monotonicNow() - startTimestamp);
   totalNumOfOps[OPEN]++;
   while (in.read(buffer) != -1) {}
   in.close();
@@ -299,9 +299,9 @@ public class LoadGenerator extends Configured implements 
Tool {
   double fileSize = 0;
   while ((fileSize = r.nextGaussian()+2)<=0) {}
   genFile(file, (long)(fileSize*BLOCK_SIZE));
-  long startTime = Time.now();
+  long startTimestamp = Time.monotonicNow();
   fc.delete(file, true);
-  executionTime[DELETE] += (Time.now()-startTime);
+  executionTime[DELETE] += (Time.monotonicNow() - startTimestamp);
   totalNumOfOps[DELETE]++;
 }
 
@@ -310,9 +310,9 @@ public class LoadGenerator extends Configured implements 
Tool {
  */
 private void list() throws IOException {
   String dirName = dirs.get(r.nextInt(dirs.size()));
-  long startTime = Time.now();
+  long startTimestamp = Time.monotonicNow();
   fc.listStatus(new Path(dirName));
-  executionTime[LIST] += (Time.now()-startTime);
+  executionTime[LIST] += (Time.monotonicNow() - startTimestamp);
   totalNumOfOps[LIST]++;
 }
 
@@ -320,14 +320,14 @@ public class LoadGenerator extends Configured implements 
Tool {
  * The file is filled with 'a'.
  */
 private void genFile(Path file, long fileSize) throws IOException {
-  long startTime = Time.now();
+  long startTimestamp = Time.monotonicNow();
   FSDataOutputStream out = null;
   try {
 out = fc.create(file,
 EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
 CreateOpts.createParent(), CreateOpts.bufferSize(4096),
 CreateOpts.repFac((short) 3));
-executionTime[CREATE] += (Time.now() - startTime);
+executionTime[CREATE] += (Time.monotonicNow() - startTimestamp);
 numOfOps[CREATE]++;
 
 long i = fileSize;
@@ -337,8 +337,8 @@ public class LoadGenerator extends Configured implements 
Tool {
   i -= s;
 }
 
-startTime = Time.now();
-executionTime[WRITE_CLOSE] += (Time.now() - startTime);
+startTimestamp = Time.monotonicNow();
+executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTimestamp);
 numOfOps[WRITE_CLOSE]++;
   } finally {
 IOUtils.cleanup(LOG, out);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly calculated. Contributed by Hanisha Koneru

2017-09-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ffcf5ba1c -> 11ac10fe1


HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly 
calculated. Contributed by Hanisha Koneru

(cherry picked from commit 6f789fe05766a61b12ca10df3f26ee354eac84aa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11ac10fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11ac10fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11ac10fe

Branch: refs/heads/branch-2
Commit: 11ac10fe1d95d67dc099e787041e0f81a507fbf2
Parents: ffcf5ba
Author: Jason Lowe 
Authored: Thu Sep 28 16:38:30 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 28 16:40:18 2017 -0500

--
 .../apache/hadoop/fs/loadGenerator/LoadGenerator.java| 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11ac10fe/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
index 0bb1b46..b74e75d 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
@@ -322,6 +322,7 @@ public class LoadGenerator extends Configured implements 
Tool {
 private void genFile(Path file, long fileSize) throws IOException {
   long startTimestamp = Time.monotonicNow();
   FSDataOutputStream out = null;
+  boolean isOutClosed = false;
   try {
 out = fc.create(file,
 EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
@@ -337,11 +338,15 @@ public class LoadGenerator extends Configured implements 
Tool {
   i -= s;
 }
 
-startTimestamp = Time.monotonicNow();
-executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTimestamp);
+startTime = Time.monotonicNow();
+out.close();
+executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTime);
 numOfOps[WRITE_CLOSE]++;
+isOutClosed = true;
   } finally {
-IOUtils.cleanupWithLogger(LOG, out);
+if (!isOutClosed && out != null) {
+  out.close();
+}
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly calculated. Contributed by Hanisha Koneru

2017-09-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 a1e973973 -> a6630f703


HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly 
calculated. Contributed by Hanisha Koneru

(cherry picked from commit 6f789fe05766a61b12ca10df3f26ee354eac84aa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a6630f70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a6630f70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a6630f70

Branch: refs/heads/branch-3.0
Commit: a6630f703cc2dff18b0d823a444fd6cad12be6d5
Parents: a1e9739
Author: Jason Lowe 
Authored: Thu Sep 28 16:38:30 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 28 16:39:47 2017 -0500

--
 .../apache/hadoop/fs/loadGenerator/LoadGenerator.java| 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6630f70/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
index 0bb1b46..b74e75d 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
@@ -322,6 +322,7 @@ public class LoadGenerator extends Configured implements 
Tool {
 private void genFile(Path file, long fileSize) throws IOException {
   long startTimestamp = Time.monotonicNow();
   FSDataOutputStream out = null;
+  boolean isOutClosed = false;
   try {
 out = fc.create(file,
 EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
@@ -337,11 +338,15 @@ public class LoadGenerator extends Configured implements 
Tool {
   i -= s;
 }
 
-startTimestamp = Time.monotonicNow();
-executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTimestamp);
+startTime = Time.monotonicNow();
+out.close();
+executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTime);
 numOfOps[WRITE_CLOSE]++;
+isOutClosed = true;
   } finally {
-IOUtils.cleanupWithLogger(LOG, out);
+if (!isOutClosed && out != null) {
+  out.close();
+}
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-6962. Add support for updateContainers when allocating using FederationInterceptor. (Botong Huang via Subru).

2017-09-28 Thread subru
YARN-6962. Add support for updateContainers when allocating using 
FederationInterceptor. (Botong Huang via Subru).

(cherry picked from commit ca669f9f8bc7abe5b7d4648c589aa1756bd336d1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffcf5ba1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffcf5ba1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffcf5ba1

Branch: refs/heads/branch-2
Commit: ffcf5ba1cec5cc0e1d805144d8514b69585221c0
Parents: d6da014
Author: Subru Krishnan 
Authored: Thu Sep 28 13:04:03 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Sep 28 13:11:32 2017 -0700

--
 .../amrmproxy/FederationInterceptor.java| 86 +---
 .../amrmproxy/TestFederationInterceptor.java| 54 
 2 files changed, 111 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffcf5ba1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
index 28724aa..33cfca3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
@@ -540,30 +540,33 @@ public class FederationInterceptor extends 
AbstractRequestInterceptor {
   }
 }
 
-if (request.getResourceBlacklistRequest() != null && !isNullOrEmpty(
-request.getResourceBlacklistRequest().getBlacklistAdditions())) {
-  for (String resourceName : request.getResourceBlacklistRequest()
-  .getBlacklistAdditions()) {
-SubClusterId subClusterId = getSubClusterForNode(resourceName);
-if (subClusterId != null) {
-  AllocateRequest newRequest = 
findOrCreateAllocateRequestForSubCluster(
-  subClusterId, request, requestMap);
-  newRequest.getResourceBlacklistRequest().getBlacklistAdditions()
-  .add(resourceName);
+if (request.getResourceBlacklistRequest() != null) {
+  if (!isNullOrEmpty(
+  request.getResourceBlacklistRequest().getBlacklistAdditions())) {
+for (String resourceName : request.getResourceBlacklistRequest()
+.getBlacklistAdditions()) {
+  SubClusterId subClusterId = getSubClusterForNode(resourceName);
+  if (subClusterId != null) {
+AllocateRequest newRequest =
+findOrCreateAllocateRequestForSubCluster(subClusterId, request,
+requestMap);
+newRequest.getResourceBlacklistRequest().getBlacklistAdditions()
+.add(resourceName);
+  }
 }
   }
-}
-
-if (request.getResourceBlacklistRequest() != null && !isNullOrEmpty(
-request.getResourceBlacklistRequest().getBlacklistRemovals())) {
-  for (String resourceName : request.getResourceBlacklistRequest()
-  .getBlacklistRemovals()) {
-SubClusterId subClusterId = getSubClusterForNode(resourceName);
-if (subClusterId != null) {
-  AllocateRequest newRequest = 
findOrCreateAllocateRequestForSubCluster(
-  subClusterId, request, requestMap);
-  newRequest.getResourceBlacklistRequest().getBlacklistRemovals()
-  .add(resourceName);
+  if (!isNullOrEmpty(
+  request.getResourceBlacklistRequest().getBlacklistRemovals())) {
+for (String resourceName : request.getResourceBlacklistRequest()
+.getBlacklistRemovals()) {
+  SubClusterId subClusterId = getSubClusterForNode(resourceName);
+  if (subClusterId != null) {
+AllocateRequest newRequest =
+findOrCreateAllocateRequestForSubCluster(subClusterId, request,
+requestMap);
+newRequest.getResourceBlacklistRequest().getBlacklistRemovals()
+.add(resourceName);
+  }
 }
   }
 }
@@ -896,13 +899,8 @@ public class FederationInterceptor extends 
AbstractRequestInterceptor {
   }
 }
 
-if (!isNullOrEmpty(otherResponse.getNMTokens())) {
-  if 

[1/2] hadoop git commit: YARN-6955. Handle concurrent register AM requests in FederationInterceptor. (Botong Huang via Subru).

2017-09-28 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 913a64e4c -> ffcf5ba1c


YARN-6955. Handle concurrent register AM requests in FederationInterceptor. 
(Botong Huang via Subru).

(cherry picked from commit c61f2c419830e40bb47fb2b1fe1f7d6109ed29a9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6da014f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6da014f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6da014f

Branch: refs/heads/branch-2
Commit: d6da014f6783694bf6d9e77b2afd75cf99680de0
Parents: 913a64e
Author: Subru Krishnan 
Authored: Mon Aug 7 16:58:29 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Sep 28 13:11:19 2017 -0700

--
 .../dev-support/findbugs-exclude.xml|  4 +-
 .../yarn/server/MockResourceManagerFacade.java  | 18 ++--
 .../amrmproxy/FederationInterceptor.java| 43 --
 .../amrmproxy/TestFederationInterceptor.java| 88 ++--
 4 files changed, 110 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6da014f/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 73f1038..2664cd5 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -567,11 +567,9 @@
 
   
 
-  
   
 
-
-
+
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6da014f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
index 68c55ac..e33d7e1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
@@ -246,6 +246,16 @@ public class MockResourceManagerFacade implements 
ApplicationClientProtocol,
 
 shouldReRegisterNext = false;
 
+synchronized (applicationContainerIdMap) {
+  if (applicationContainerIdMap.containsKey(amrmToken)) {
+throw new InvalidApplicationMasterRequestException(
+AMRMClientUtils.APP_ALREADY_REGISTERED_MESSAGE);
+  }
+  // Keep track of the containers that are returned to this application
+  applicationContainerIdMap.put(amrmToken, new ArrayList());
+}
+
+// Make sure we wait for certain test cases last in the method
 synchronized (syncObj) {
   syncObj.notifyAll();
   // We reuse the port number to indicate whether the unit test want us to
@@ -261,14 +271,6 @@ public class MockResourceManagerFacade implements 
ApplicationClientProtocol,
   }
 }
 
-synchronized (applicationContainerIdMap) {
-  if (applicationContainerIdMap.containsKey(amrmToken)) {
-throw new InvalidApplicationMasterRequestException(
-AMRMClientUtils.APP_ALREADY_REGISTERED_MESSAGE);
-  }
-  // Keep track of the containers that are returned to this application
-  applicationContainerIdMap.put(amrmToken, new ArrayList());
-}
 return RegisterApplicationMasterResponse.newInstance(null, null, null, 
null,
 null, request.getHost(), null);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6da014f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
index ffe47f4..28724aa 100644
--- 

hadoop git commit: YARN-6962. Add support for updateContainers when allocating using FederationInterceptor. (Botong Huang via Subru).

2017-09-28 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 85d81ae58 -> ca669f9f8


YARN-6962. Add support for updateContainers when allocating using 
FederationInterceptor. (Botong Huang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca669f9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca669f9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca669f9f

Branch: refs/heads/trunk
Commit: ca669f9f8bc7abe5b7d4648c589aa1756bd336d1
Parents: 85d81ae5
Author: Subru Krishnan 
Authored: Thu Sep 28 13:04:03 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Sep 28 13:04:03 2017 -0700

--
 .../amrmproxy/FederationInterceptor.java| 86 +---
 .../amrmproxy/TestFederationInterceptor.java| 54 
 2 files changed, 111 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca669f9f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
index 28724aa..33cfca3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
@@ -540,30 +540,33 @@ public class FederationInterceptor extends 
AbstractRequestInterceptor {
   }
 }
 
-if (request.getResourceBlacklistRequest() != null && !isNullOrEmpty(
-request.getResourceBlacklistRequest().getBlacklistAdditions())) {
-  for (String resourceName : request.getResourceBlacklistRequest()
-  .getBlacklistAdditions()) {
-SubClusterId subClusterId = getSubClusterForNode(resourceName);
-if (subClusterId != null) {
-  AllocateRequest newRequest = 
findOrCreateAllocateRequestForSubCluster(
-  subClusterId, request, requestMap);
-  newRequest.getResourceBlacklistRequest().getBlacklistAdditions()
-  .add(resourceName);
+if (request.getResourceBlacklistRequest() != null) {
+  if (!isNullOrEmpty(
+  request.getResourceBlacklistRequest().getBlacklistAdditions())) {
+for (String resourceName : request.getResourceBlacklistRequest()
+.getBlacklistAdditions()) {
+  SubClusterId subClusterId = getSubClusterForNode(resourceName);
+  if (subClusterId != null) {
+AllocateRequest newRequest =
+findOrCreateAllocateRequestForSubCluster(subClusterId, request,
+requestMap);
+newRequest.getResourceBlacklistRequest().getBlacklistAdditions()
+.add(resourceName);
+  }
 }
   }
-}
-
-if (request.getResourceBlacklistRequest() != null && !isNullOrEmpty(
-request.getResourceBlacklistRequest().getBlacklistRemovals())) {
-  for (String resourceName : request.getResourceBlacklistRequest()
-  .getBlacklistRemovals()) {
-SubClusterId subClusterId = getSubClusterForNode(resourceName);
-if (subClusterId != null) {
-  AllocateRequest newRequest = 
findOrCreateAllocateRequestForSubCluster(
-  subClusterId, request, requestMap);
-  newRequest.getResourceBlacklistRequest().getBlacklistRemovals()
-  .add(resourceName);
+  if (!isNullOrEmpty(
+  request.getResourceBlacklistRequest().getBlacklistRemovals())) {
+for (String resourceName : request.getResourceBlacklistRequest()
+.getBlacklistRemovals()) {
+  SubClusterId subClusterId = getSubClusterForNode(resourceName);
+  if (subClusterId != null) {
+AllocateRequest newRequest =
+findOrCreateAllocateRequestForSubCluster(subClusterId, request,
+requestMap);
+newRequest.getResourceBlacklistRequest().getBlacklistRemovals()
+.add(resourceName);
+  }
 }
   }
 }
@@ -896,13 +899,8 @@ public class FederationInterceptor extends 
AbstractRequestInterceptor {
   }
 }
 
-if (!isNullOrEmpty(otherResponse.getNMTokens())) {
-  if 

hadoop git commit: YARN-7248. NM returns new SCHEDULED container status to older clients. Contributed by Arun Suresh

2017-09-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 53c111590 -> 913a64e4c


YARN-7248. NM returns new SCHEDULED container status to older clients. 
Contributed by Arun Suresh

(cherry picked from commit 85d81ae58ec4361a944c84753a900460a0888b9b)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/913a64e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/913a64e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/913a64e4

Branch: refs/heads/branch-2
Commit: 913a64e4c953a493454bf62c79fdd93796f51c17
Parents: 53c1115
Author: Jason Lowe 
Authored: Thu Sep 28 14:10:15 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 28 14:25:01 2017 -0500

--
 .../hadoop/yarn/api/records/ContainerState.java | 10 +--
 .../yarn/api/records/ContainerStatus.java   | 22 ++
 .../yarn/api/records/ContainerSubState.java | 57 ++
 .../src/main/proto/yarn_protos.proto| 34 -
 .../records/impl/pb/ContainerStatusPBImpl.java  | 24 +-
 .../yarn/api/records/impl/pb/ProtoUtils.java| 20 -
 .../container/ContainerImpl.java| 33 -
 .../container/ContainerState.java   |  9 +++
 .../yarn/server/nodemanager/TestEventFlow.java  |  2 +-
 .../nodemanager/TestNodeManagerShutdown.java|  3 +-
 .../TestContainerSchedulerQueuing.java  | 78 
 .../resourcemanager/rmnode/RMNodeImpl.java  |  6 +-
 .../TestResourceTrackerService.java |  2 +-
 13 files changed, 229 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/913a64e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
index 45e5bd4..e22204e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.api.records;
 
 import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 
 /**
@@ -35,12 +34,5 @@ public enum ContainerState {
   RUNNING, 
 
   /** Completed container */
-  COMPLETE,
-
-  /** Scheduled (awaiting resources) at the NM. */
-  @InterfaceStability.Unstable
-  SCHEDULED,
-
-  /** Paused at the NM. */
-  PAUSED
+  COMPLETE
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/913a64e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
index d7c75f3..edc62fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
@@ -201,4 +201,26 @@ public abstract class ContainerStatus {
 throw new UnsupportedOperationException(
 "subclass must implement this method");
   }
+
+  /**
+   * Add Extra state information of the container (SCHEDULED, LOCALIZING etc.).
+   * @param subState Extra State Information.
+   */
+  @Private
+  @Unstable
+  public void setContainerSubState(ContainerSubState subState) {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
+
+  /**
+   * Get Extra state information of the container (SCHEDULED, LOCALIZING etc.).
+   * @return Extra State information.
+   */
+  @Private
+  @Unstable
+  public ContainerSubState getContainerSubState() {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
 }


hadoop git commit: YARN-7248. NM returns new SCHEDULED container status to older clients. Contributed by Arun Suresh

2017-09-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 5a4f37019 -> a1e973973


YARN-7248. NM returns new SCHEDULED container status to older clients. 
Contributed by Arun Suresh

(cherry picked from commit 85d81ae58ec4361a944c84753a900460a0888b9b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1e97397
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1e97397
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1e97397

Branch: refs/heads/branch-3.0
Commit: a1e9739730651a70ea873be79aeb807c07005b35
Parents: 5a4f370
Author: Jason Lowe 
Authored: Thu Sep 28 14:10:15 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 28 14:14:14 2017 -0500

--
 .../hadoop/yarn/api/records/ContainerState.java | 10 +--
 .../yarn/api/records/ContainerStatus.java   | 22 ++
 .../yarn/api/records/ContainerSubState.java | 57 ++
 .../src/main/proto/yarn_protos.proto| 34 -
 .../records/impl/pb/ContainerStatusPBImpl.java  | 24 +-
 .../yarn/api/records/impl/pb/ProtoUtils.java| 20 -
 .../container/ContainerImpl.java| 33 -
 .../container/ContainerState.java   |  9 +++
 .../yarn/server/nodemanager/TestEventFlow.java  |  2 +-
 .../nodemanager/TestNodeManagerShutdown.java|  3 +-
 .../TestContainerSchedulerQueuing.java  | 78 
 .../resourcemanager/rmnode/RMNodeImpl.java  |  6 +-
 .../TestResourceTrackerService.java |  2 +-
 13 files changed, 229 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e97397/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
index 45e5bd4..e22204e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.api.records;
 
 import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 
 /**
@@ -35,12 +34,5 @@ public enum ContainerState {
   RUNNING, 
 
   /** Completed container */
-  COMPLETE,
-
-  /** Scheduled (awaiting resources) at the NM. */
-  @InterfaceStability.Unstable
-  SCHEDULED,
-
-  /** Paused at the NM. */
-  PAUSED
+  COMPLETE
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e97397/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
index d7c75f3..edc62fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
@@ -201,4 +201,26 @@ public abstract class ContainerStatus {
 throw new UnsupportedOperationException(
 "subclass must implement this method");
   }
+
+  /**
+   * Add Extra state information of the container (SCHEDULED, LOCALIZING etc.).
+   * @param subState Extra State Information.
+   */
+  @Private
+  @Unstable
+  public void setContainerSubState(ContainerSubState subState) {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
+
+  /**
+   * Get Extra state information of the container (SCHEDULED, LOCALIZING etc.).
+   * @return Extra State information.
+   */
+  @Private
+  @Unstable
+  public ContainerSubState getContainerSubState() {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e97397/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerSubState.java
--
diff --git 

hadoop git commit: YARN-7248. NM returns new SCHEDULED container status to older clients. Contributed by Arun Suresh

2017-09-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk a530e7ab3 -> 85d81ae58


YARN-7248. NM returns new SCHEDULED container status to older clients. 
Contributed by Arun Suresh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85d81ae5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85d81ae5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85d81ae5

Branch: refs/heads/trunk
Commit: 85d81ae58ec4361a944c84753a900460a0888b9b
Parents: a530e7a
Author: Jason Lowe 
Authored: Thu Sep 28 14:10:15 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 28 14:10:15 2017 -0500

--
 .../hadoop/yarn/api/records/ContainerState.java | 10 +--
 .../yarn/api/records/ContainerStatus.java   | 22 ++
 .../yarn/api/records/ContainerSubState.java | 57 ++
 .../src/main/proto/yarn_protos.proto| 34 -
 .../records/impl/pb/ContainerStatusPBImpl.java  | 24 +-
 .../yarn/api/records/impl/pb/ProtoUtils.java| 20 -
 .../container/ContainerImpl.java| 33 -
 .../container/ContainerState.java   |  9 +++
 .../yarn/server/nodemanager/TestEventFlow.java  |  2 +-
 .../nodemanager/TestNodeManagerShutdown.java|  3 +-
 .../TestContainerSchedulerQueuing.java  | 78 
 .../resourcemanager/rmnode/RMNodeImpl.java  |  6 +-
 .../TestResourceTrackerService.java |  2 +-
 13 files changed, 229 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85d81ae5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
index 45e5bd4..e22204e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.api.records;
 
 import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 
 /**
@@ -35,12 +34,5 @@ public enum ContainerState {
   RUNNING, 
 
   /** Completed container */
-  COMPLETE,
-
-  /** Scheduled (awaiting resources) at the NM. */
-  @InterfaceStability.Unstable
-  SCHEDULED,
-
-  /** Paused at the NM. */
-  PAUSED
+  COMPLETE
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85d81ae5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
index d7c75f3..edc62fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
@@ -201,4 +201,26 @@ public abstract class ContainerStatus {
 throw new UnsupportedOperationException(
 "subclass must implement this method");
   }
+
+  /**
+   * Add Extra state information of the container (SCHEDULED, LOCALIZING etc.).
+   * @param subState Extra State Information.
+   */
+  @Private
+  @Unstable
+  public void setContainerSubState(ContainerSubState subState) {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
+
+  /**
+   * Get Extra state information of the container (SCHEDULED, LOCALIZING etc.).
+   * @return Extra State information.
+   */
+  @Private
+  @Unstable
+  public ContainerSubState getContainerSubState() {
+throw new UnsupportedOperationException(
+"subclass must implement this method");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85d81ae5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerSubState.java
--
diff --git 

hadoop git commit: HADOOP-14768. Honoring sticky bit during Deletion when authorization is enabled in WASB Contributed by Varada Hemeswari

2017-09-28 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8e1bd114e -> a530e7ab3


HADOOP-14768. Honoring sticky bit during Deletion when authorization is enabled 
in WASB
Contributed by Varada Hemeswari


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a530e7ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a530e7ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a530e7ab

Branch: refs/heads/trunk
Commit: a530e7ab3b3f5bd71143a91266b46787962ac532
Parents: 8e1bd11
Author: Steve Loughran 
Authored: Thu Sep 28 19:52:56 2017 +0100
Committer: Steve Loughran 
Committed: Thu Sep 28 19:52:56 2017 +0100

--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 522 +++--
 ...stNativeAzureFSAuthWithBlobSpecificKeys.java |   2 +-
 .../ITestNativeAzureFSAuthorizationCaching.java |   2 +-
 ...veAzureFileSystemAuthorizationWithOwner.java | 122 
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java | 284 +-
 .../TestNativeAzureFileSystemAuthorization.java | 555 +--
 6 files changed, 1136 insertions(+), 351 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a530e7ab/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 280c0e0..5f86f84 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -40,6 +40,8 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Stack;
+import java.util.HashMap;
 
 import com.fasterxml.jackson.core.JsonParseException;
 import com.fasterxml.jackson.core.JsonParser;
@@ -80,6 +82,8 @@ import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.fs.azure.NativeAzureFileSystemHelper.*;
+
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.google.common.annotations.VisibleForTesting;
 import com.microsoft.azure.storage.StorageException;
@@ -1849,31 +1853,256 @@ public class NativeAzureFileSystem extends FileSystem {
   }
 
   /**
-   * Delete the specified file or folder. The parameter
-   * skipParentFolderLastModifiedTimeUpdate
-   * is used in the case of atomic folder rename redo. In that case, there is
-   * a lease on the parent folder, so (without reworking the code) modifying
-   * the parent folder update time will fail because of a conflict with the
-   * lease. Since we are going to delete the folder soon anyway so accurate
-   * modified time is not necessary, it's easier to just skip
-   * the modified time update.
-   *
-   * @param f file path to be deleted.
-   * @param recursive specify deleting recursively or not.
-   * @param skipParentFolderLastModifiedTimeUpdate If true, don't update the 
folder last
-   * modified time.
-   * @return true if and only if the file is deleted
-   * @throws IOException Thrown when fail to delete file or directory.
+   * Delete file or folder with authorization checks. Most of the code
+   * is duplicate of the actual delete implementation and will be merged
+   * once the performance and funcional aspects are guaranteed not to
+   * regress existing delete semantics.
*/
-  public boolean delete(Path f, boolean recursive,
+  private boolean deleteWithAuthEnabled(Path f, boolean recursive,
   boolean skipParentFolderLastModifiedTimeUpdate) throws IOException {
 
-LOG.debug("Deleting file: {}", f.toString());
+LOG.debug("Deleting file: {}", f);
 
 Path absolutePath = makeAbsolute(f);
 Path parentPath = absolutePath.getParent();
 
-performAuthCheck(parentPath, WasbAuthorizationOperations.WRITE, "delete", 
absolutePath);
+// If delete is issued for 'root', parentPath will be null
+// In that case, we perform auth check for root itself before
+// proceeding for deleting contents under root.
+if (parentPath != null) {
+  performAuthCheck(parentPath, WasbAuthorizationOperations.WRITE, 
"delete", absolutePath);
+} else {
+  performAuthCheck(absolutePath, WasbAuthorizationOperations.WRITE, 
"delete", absolutePath);
+}
+
+String key = pathToKey(absolutePath);
+
+// Capture the metadata for the path.
+FileMetadata metaFile = null;
+try {
+  metaFile = store.retrieveMetadata(key);
+} catch (IOException e) {
+
+  Throwable innerException = 

hadoop git commit: YARN-6691. Update YARN daemon startup/shutdown scripts to include Router service. (Giovanni Matteo Fumarola via asuresh)

2017-09-28 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c143708ac -> 53c111590


YARN-6691. Update YARN daemon startup/shutdown scripts to include Router 
service. (Giovanni Matteo Fumarola via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53c11159
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53c11159
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53c11159

Branch: refs/heads/branch-2
Commit: 53c1115908780adcb4e7ce621ff30de211e6aa51
Parents: c143708
Author: Arun Suresh 
Authored: Thu Sep 28 11:41:43 2017 -0700
Committer: Arun Suresh 
Committed: Thu Sep 28 11:41:43 2017 -0700

--
 hadoop-yarn-project/hadoop-yarn/bin/yarn |  8 
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd | 13 -
 hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh | 10 ++
 3 files changed, 30 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c11159/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 552cef4..d476cc6 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -71,6 +71,7 @@ function print_usage(){
   echo "  nodemanager   run a nodemanager on each 
slave"
   echo "  timelineserverrun the timeline server"
   echo "  rmadmin   admin tools"
+  echo "  routerrun the Router daemon"
   echo "  sharedcachemanagerrun the SharedCacheManager 
daemon"
   echo "  scmadmin  SharedCacheManager admin tools"
   echo "  version   print the version"
@@ -177,6 +178,9 @@ fi
 if [ -d "$HADOOP_YARN_HOME/build/tools" ]; then
   CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/build/tools
 fi
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-router/target/classes" ]; 
then
+  
CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-router/target/classes
+fi
 
 CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/${YARN_DIR}/*
 CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/${YARN_LIB_JARS_DIR}/*
@@ -220,6 +224,10 @@ if [ "$COMMAND" = "classpath" ] ; then
 elif [ "$COMMAND" = "rmadmin" ] ; then
   CLASS='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
   YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "router" ] ; then
+  CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/rt-config/log4j.properties
+  CLASS='org.apache.hadoop.yarn.server.router.Router'
+  YARN_OPTS="$YARN_OPTS $YARN_ROUTER_OPTS"
 elif [ "$COMMAND" = "scmadmin" ] ; then
   CLASS='org.apache.hadoop.yarn.client.SCMAdmin'
   YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c11159/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index 3cd57a7..230e88c 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -138,6 +138,10 @@ if "%1" == "--loglevel" (
 set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\tools
   )
 
+  if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-router\target\classes (
+set 
CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-router\target\classes
+  )
+
   set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\*
   set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\*
 
@@ -151,7 +155,7 @@ if "%1" == "--loglevel" (
 
   set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar 
^
  application applicationattempt cluster container node queue logs 
daemonlog historyserver ^
- timelineserver classpath
+ timelineserver router classpath
   for %%i in ( %yarncommands% ) do (
 if %yarn-command% == %%i set yarncommand=true
   )
@@ -242,6 +246,12 @@ goto :eof
   )
   goto :eof
 
+:router
+  set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\router-config\log4j.properties
+  set CLASS=org.apache.hadoop.yarn.server.router.Router
+  set YARN_OPTS=%YARN_OPTS% %HADOOP_ROUTER_OPTS%
+  goto :eof
+
 :nodemanager
   set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\nm-config\log4j.properties
   set CLASS=org.apache.hadoop.yarn.server.nodemanager.NodeManager
@@ -311,6 +321,7 @@ goto :eof
   @echowhere COMMAND is one of:
   @echo   resourcemanager  run the ResourceManager
   @echo   nodemanager  run a nodemanager on each slave
+  @echo   router   run the Router daemon
   @echo   timelineserver   run the 

hadoop git commit: Updating maven version to 3.0.0-beta1 for release

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-beta1 2223393ad -> 1002c582d


Updating maven version to 3.0.0-beta1 for release


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1002c582
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1002c582
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1002c582

Branch: refs/heads/branch-3.0.0-beta1
Commit: 1002c582d86ae8689c497c3d31b73f1ab92d5e29
Parents: 2223393
Author: Andrew Wang 
Authored: Thu Sep 28 11:32:31 2017 -0700
Committer: Andrew Wang 
Committed: Thu Sep 28 11:32:31 2017 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml

hadoop git commit: Preparing for 3.0.0 GA development

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 59453dad8 -> 5a4f37019


Preparing for 3.0.0 GA development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a4f3701
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a4f3701
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a4f3701

Branch: refs/heads/branch-3.0
Commit: 5a4f37019ae911f25dc98dbd74899d99f31ff1c5
Parents: 59453da
Author: Andrew Wang 
Authored: Thu Sep 28 11:31:54 2017 -0700
Committer: Andrew Wang 
Committed: Thu Sep 28 11:32:01 2017 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml  | 4 ++--
 

[2/4] hadoop git commit: YARN-5216. Expose configurable preemption policy for OPPORTUNISTIC containers running on the NM. (Hitesh Sharma via asuresh)

2017-09-28 Thread asuresh
YARN-5216. Expose configurable preemption policy for OPPORTUNISTIC containers 
running on the NM. (Hitesh Sharma via asuresh)

(cherry picked from commit 4f8194430fc6a69d9cc99b78828fd7045d5683e8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8eebee50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8eebee50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8eebee50

Branch: refs/heads/branch-3.0
Commit: 8eebee5094e9112920793e45a08b78c464bc7787
Parents: dd51a74
Author: Arun Suresh 
Authored: Sat Dec 24 17:16:52 2016 -0800
Committer: Arun Suresh 
Committed: Thu Sep 28 11:24:20 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   9 ++
 .../src/main/resources/yarn-default.xml |   9 ++
 .../containermanager/container/Container.java   |   2 +
 .../container/ContainerImpl.java|  32 --
 .../scheduler/ContainerScheduler.java   |  84 ---
 .../TestContainerSchedulerQueuing.java  | 103 +++
 .../nodemanager/webapp/MockContainer.java   |   5 +
 7 files changed, 218 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8eebee50/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 572bab9..9985458 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1034,6 +1034,15 @@ public class YarnConfiguration extends Configuration {
   NM_PREFIX + "container-retry-minimum-interval-ms";
   public static final int DEFAULT_NM_CONTAINER_RETRY_MINIMUM_INTERVAL_MS = 
1000;
 
+  /**
+   * Use container pause as the preemption policy over kill in the container
+   * queue at a NodeManager.
+   **/
+  public static final String NM_CONTAINER_QUEUING_USE_PAUSE_FOR_PREEMPTION =
+  NM_PREFIX + "opportunistic-containers-use-pause-for-preemption";
+  public static final boolean
+  DEFAULT_NM_CONTAINER_QUEUING_USE_PAUSE_FOR_PREEMPTION = false;
+
   /** Interval at which the delayed token removal thread runs */
   public static final String RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS =
   RM_PREFIX + "delayed.delegation-token.removal-interval-ms";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8eebee50/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index cf17dfc..4578f5a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3028,6 +3028,15 @@
 
   
 
+Use container pause as the preemption policy over kill in the container
+queue at a NodeManager.
+
+
yarn.nodemanager.opportunistic-containers-use-pause-for-preemption
+false
+  
+
+  
+
 Error filename pattern, to identify the file in the container's
 Log directory which contain the container's error log. As error file
 redirection is done by client/AM and yarn will not be aware of the error

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8eebee50/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
index ac9fbb7..ae83b88 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
+++ 

[3/4] hadoop git commit: YARN-6059. Update paused container state in the NM state store. (Hitesh Sharma via asuresh)

2017-09-28 Thread asuresh
YARN-6059. Update paused container state in the NM state store. (Hitesh Sharma 
via asuresh)

(cherry picked from commit 66ca0a65408521d5f9b080dd16b353b49fb8eaea)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed855e2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed855e2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed855e2e

Branch: refs/heads/branch-3.0
Commit: ed855e2eef52320c1667b4f55df84aa827adde70
Parents: 8eebee5
Author: Arun Suresh 
Authored: Tue Sep 12 12:22:00 2017 -0700
Committer: Arun Suresh 
Committed: Thu Sep 28 11:27:43 2017 -0700

--
 .../container/ContainerImpl.java|  16 ++-
 .../launcher/ContainerLaunch.java   |  28 -
 .../launcher/ContainersLauncher.java|  10 ++
 .../launcher/ContainersLauncherEventType.java   |   3 +-
 .../launcher/RecoverPausedContainerLaunch.java  | 124 +++
 .../launcher/RecoveredContainerLaunch.java  |   3 +-
 .../recovery/NMLeveldbStateStoreService.java|  42 ++-
 .../recovery/NMNullStateStoreService.java   |   9 ++
 .../recovery/NMStateStoreService.java   |  26 +++-
 .../recovery/NMMemoryStateStoreService.java |  13 ++
 .../TestNMLeveldbStateStoreService.java |  17 +++
 11 files changed, 273 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed855e2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 9e7a664..8efff31 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -834,10 +834,18 @@ public class ContainerImpl implements Container {
 
   @SuppressWarnings("unchecked") // dispatcher not typed
   private void sendScheduleEvent() {
-dispatcher.getEventHandler().handle(
-new ContainerSchedulerEvent(this,
-ContainerSchedulerEventType.SCHEDULE_CONTAINER)
-);
+if (recoveredStatus == RecoveredContainerStatus.PAUSED) {
+  // Recovery is not supported for paused container so we raise the
+  // launch event which will proceed to kill the paused container instead
+  // of raising the schedule event.
+  ContainersLauncherEventType launcherEvent;
+  launcherEvent = ContainersLauncherEventType.RECOVER_PAUSED_CONTAINER;
+  dispatcher.getEventHandler()
+  .handle(new ContainersLauncherEvent(this, launcherEvent));
+} else {
+  dispatcher.getEventHandler().handle(new ContainerSchedulerEvent(this,
+  ContainerSchedulerEventType.SCHEDULE_CONTAINER));
+}
   }
 
   @SuppressWarnings("unchecked") // dispatcher not typed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed855e2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 89dfdd1..e254887 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -837,6 +837,14 @@ public class ContainerLaunch implements Callable {
   dispatcher.getEventHandler().handle(new ContainerEvent(
   containerId,
   

[4/4] hadoop git commit: YARN-7240. Add more states and transitions to stabilize the NM Container state machine. (Kartheek Muthyala via asuresh)

2017-09-28 Thread asuresh
YARN-7240. Add more states and transitions to stabilize the NM Container state 
machine. (Kartheek Muthyala via asuresh)

(cherry picked from commit df800f6cf3ea663daf4081ebe784808b08d9366d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59453dad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59453dad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59453dad

Branch: refs/heads/branch-3.0
Commit: 59453dad8c39590dc16bef5cdf4fdae6618c9fa2
Parents: ed855e2
Author: Arun Suresh 
Authored: Mon Sep 25 14:11:55 2017 -0700
Committer: Arun Suresh 
Committed: Thu Sep 28 11:29:26 2017 -0700

--
 .../containermanager/ContainerManagerImpl.java  |  41 +---
 .../container/ContainerEventType.java   |   6 +-
 .../container/ContainerImpl.java| 174 --
 .../container/ContainerState.java   |   3 +-
 .../container/UpdateContainerTokenEvent.java|  86 +++
 .../scheduler/ContainerScheduler.java   | 114 -
 .../UpdateContainerSchedulerEvent.java  |  46 ++--
 .../BaseContainerManagerTest.java   |   2 +
 .../containermanager/TestContainerManager.java  | 229 ++-
 .../TestContainerSchedulerQueuing.java  | 101 
 10 files changed, 660 insertions(+), 142 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59453dad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index e497f62..d12892e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -20,6 +20,7 @@ package 
org.apache.hadoop.yarn.server.nodemanager.containermanager;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.ByteString;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.UpdateContainerTokenEvent;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -144,7 +145,6 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.Contai
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.ContainerScheduler;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.ContainerSchedulerEventType;
 
-import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.UpdateContainerSchedulerEvent;
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
 import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredApplicationsState;
@@ -1251,29 +1251,6 @@ public class ContainerManagerImpl extends 
CompositeService implements
   + " [" + containerTokenIdentifier.getVersion() + "]");
 }
 
-// Check container state
-org.apache.hadoop.yarn.server.nodemanager.
-containermanager.container.ContainerState currentState =
-container.getContainerState();
-EnumSet allowedStates = EnumSet.of(
-org.apache.hadoop.yarn.server.nodemanager.containermanager.container
-.ContainerState.RUNNING,
-org.apache.hadoop.yarn.server.nodemanager.containermanager.container
-.ContainerState.SCHEDULED,
-org.apache.hadoop.yarn.server.nodemanager.containermanager.container
-.ContainerState.LOCALIZING,
-org.apache.hadoop.yarn.server.nodemanager.containermanager.container
-.ContainerState.REINITIALIZING,
-org.apache.hadoop.yarn.server.nodemanager.containermanager.container
-.ContainerState.RELAUNCHING);
-if (!allowedStates.contains(currentState)) {
-  throw RPCUtil.getRemoteException("Container " + containerId.toString()
-  + " is in " + currentState.name() + " state."
-  + " Resource can only be changed when a container is in"
-

[1/4] hadoop git commit: YARN-5292. NM Container lifecycle and state transitions to support for PAUSED container state. (Hitesh Sharma via asuresh)

2017-09-28 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 2223393ad -> 59453dad8


YARN-5292. NM Container lifecycle and state transitions to support for PAUSED 
container state. (Hitesh Sharma via asuresh)

(cherry picked from commit 864fbacd4548004b1de8b0812627976acd22aff5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd51a746
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd51a746
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd51a746

Branch: refs/heads/branch-3.0
Commit: dd51a7460134b9555dfa5f87fcbb5e5ede8deae0
Parents: 2223393
Author: Arun Suresh 
Authored: Fri Dec 9 07:51:03 2016 -0800
Committer: Arun Suresh 
Committed: Thu Sep 28 11:22:58 2017 -0700

--
 .../hadoop/yarn/api/records/ContainerState.java |   7 +-
 .../src/main/proto/yarn_protos.proto|   1 +
 .../server/nodemanager/ContainerExecutor.java   |  22 +++
 .../container/ContainerEventType.java   |   6 +-
 .../container/ContainerImpl.java| 170 ++-
 .../container/ContainerPauseEvent.java  |  40 +
 .../container/ContainerResumeEvent.java |  39 +
 .../container/ContainerState.java   |   3 +-
 .../launcher/ContainerLaunch.java   |  90 +-
 .../launcher/ContainersLauncher.java|  32 
 .../launcher/ContainersLauncherEventType.java   |   3 +
 .../scheduler/ContainerSchedulerEventType.java  |   1 +
 .../container/TestContainer.java|  51 ++
 13 files changed, 454 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd51a746/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
index 696fe06..45e5bd4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java
@@ -33,11 +33,14 @@ public enum ContainerState {
   
   /** Running container */
   RUNNING, 
-  
+
   /** Completed container */
   COMPLETE,
 
   /** Scheduled (awaiting resources) at the NM. */
   @InterfaceStability.Unstable
-  SCHEDULED
+  SCHEDULED,
+
+  /** Paused at the NM. */
+  PAUSED
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd51a746/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 8354fd2..c2b0e5e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -83,6 +83,7 @@ enum ContainerStateProto {
   C_RUNNING = 2;
   C_COMPLETE = 3;
   C_SCHEDULED = 4;
+  C_PAUSED = 5;
 }
 
 message ContainerProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd51a746/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 072cca7..da50d7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -700,6 +700,28 @@ public abstract class ContainerExecutor implements 
Configurable {
   }
 
   /**
+   * Pause the container. The default implementation is to raise a kill event.
+   * Specific executor implementations can override this behavior.
+   * @param container
+   *  the Container
+   */
+  public void pauseContainer(Container 

[hadoop] Git Push Summary

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-beta1 [created] 2223393ad

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14851 LambdaTestUtils.eventually() doesn't spin on Assertion failures. Contributed by Steve Loughran

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 46031d84f -> 2223393ad


HADOOP-14851 LambdaTestUtils.eventually() doesn't spin on Assertion failures.  
Contributed by Steve Loughran

(cherry picked from commit 180e814b081d3707c95641171d649b547db41a04)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2223393a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2223393a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2223393a

Branch: refs/heads/branch-3.0
Commit: 2223393ad1d5ffdd62da79e1546de79c6259dc12
Parents: 46031d8
Author: Aaron Fabbri 
Authored: Fri Sep 8 19:26:27 2017 -0700
Committer: Andrew Wang 
Committed: Thu Sep 28 10:25:17 2017 -0700

--
 .../org/apache/hadoop/test/LambdaTestUtils.java |  68 +++---
 .../apache/hadoop/test/TestLambdaTestUtils.java | 127 +--
 2 files changed, 163 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2223393a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
index 00cfa44..3ea9ab8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
@@ -70,7 +70,7 @@ public final class LambdaTestUtils {
  * @throws Exception if the handler wishes to raise an exception
  * that way.
  */
-Exception evaluate(int timeoutMillis, Exception caught) throws Exception;
+Throwable evaluate(int timeoutMillis, Throwable caught) throws Throwable;
   }
 
   /**
@@ -116,7 +116,7 @@ public final class LambdaTestUtils {
 Preconditions.checkNotNull(timeoutHandler);
 
 long endTime = Time.now() + timeoutMillis;
-Exception ex = null;
+Throwable ex = null;
 boolean running = true;
 int iterations = 0;
 while (running) {
@@ -128,9 +128,11 @@ public final class LambdaTestUtils {
 // the probe failed but did not raise an exception. Reset any
 // exception raised by a previous probe failure.
 ex = null;
-  } catch (InterruptedException | FailFastException e) {
+  } catch (InterruptedException
+  | FailFastException
+  | VirtualMachineError e) {
 throw e;
-  } catch (Exception e) {
+  } catch (Throwable e) {
 LOG.debug("eventually() iteration {}", iterations, e);
 ex = e;
   }
@@ -145,15 +147,20 @@ public final class LambdaTestUtils {
   }
 }
 // timeout
-Exception evaluate = timeoutHandler.evaluate(timeoutMillis, ex);
-if (evaluate == null) {
-  // bad timeout handler logic; fall back to GenerateTimeout so the
-  // underlying problem isn't lost.
-  LOG.error("timeout handler {} did not throw an exception ",
-  timeoutHandler);
-  evaluate = new GenerateTimeout().evaluate(timeoutMillis, ex);
+Throwable evaluate;
+try {
+  evaluate = timeoutHandler.evaluate(timeoutMillis, ex);
+  if (evaluate == null) {
+// bad timeout handler logic; fall back to GenerateTimeout so the
+// underlying problem isn't lost.
+LOG.error("timeout handler {} did not throw an exception ",
+timeoutHandler);
+evaluate = new GenerateTimeout().evaluate(timeoutMillis, ex);
+  }
+} catch (Throwable throwable) {
+  evaluate = throwable;
 }
-throw evaluate;
+return raise(evaluate);
   }
 
   /**
@@ -217,6 +224,7 @@ public final class LambdaTestUtils {
* @throws Exception the last exception thrown before timeout was triggered
* @throws FailFastException if raised -without any retry attempt.
* @throws InterruptedException if interrupted during the sleep operation.
+   * @throws OutOfMemoryError you've run out of memory.
*/
   public static  T eventually(int timeoutMillis,
   Callable eval,
@@ -224,7 +232,7 @@ public final class LambdaTestUtils {
 Preconditions.checkArgument(timeoutMillis >= 0,
 "timeoutMillis must be >= 0");
 long endTime = Time.now() + timeoutMillis;
-Exception ex;
+Throwable ex;
 boolean running;
 int sleeptime;
 int iterations = 0;
@@ -232,10 +240,12 @@ public final class LambdaTestUtils {
   iterations++;
   try {
 return eval.call();
-  } catch (InterruptedException | FailFastException e) {
+  } catch (InterruptedException
+  | FailFastException
+  | VirtualMachineError e) {
  

hadoop git commit: HDFS-12404. Rename hdfs config authorization.provider.bypass.users to attributes.provider.bypass.users.

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 d9dd85673 -> 46031d84f


HDFS-12404. Rename hdfs config authorization.provider.bypass.users to 
attributes.provider.bypass.users.

(cherry picked from commit 3b3be355b35d08a78d9dcd647650812a2d28207b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46031d84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46031d84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46031d84

Branch: refs/heads/branch-3.0
Commit: 46031d84fb071ff1bfda9e41a82e551cf0a198c2
Parents: d9dd856
Author: Manoj Govindassamy 
Authored: Thu Sep 7 17:20:42 2017 -0700
Committer: Andrew Wang 
Committed: Thu Sep 28 10:22:25 2017 -0700

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46031d84/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index d778d45..43e87c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4158,11 +4158,11 @@
 
 
 
-  dfs.namenode.authorization.provider.bypass.users
+  dfs.namenode.inode.attributes.provider.bypass.users
   
   
 A list of user principals (in secure cluster) or user names (in insecure
-cluster) for whom the external attribute provider will be bypassed for all
+cluster) for whom the external attributes provider will be bypassed for all
 operations. This means file attributes stored in HDFS instead of the
 external provider will be used for permission checking and be returned when
 requested.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12560. Remove the extra word it in HdfsUserGuide.md. Contributed by fang zhenyi.

2017-09-28 Thread cliang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7c34ceaf4 -> 8e1bd114e


HDFS-12560. Remove the extra word it in HdfsUserGuide.md. Contributed by fang 
zhenyi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e1bd114
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e1bd114
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e1bd114

Branch: refs/heads/trunk
Commit: 8e1bd114ee718c42cd3f3275bee20f5d401a1d99
Parents: 7c34cea
Author: Chen Liang 
Authored: Thu Sep 28 10:22:27 2017 -0700
Committer: Chen Liang 
Committed: Thu Sep 28 10:22:27 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e1bd114/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
index 7a06c41..6f707f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
@@ -257,7 +257,7 @@ During start up the NameNode loads the file system state 
from the fsimage and th
 fsck
 
 
-HDFS supports the fsck command to check for various inconsistencies. It it is 
designed for reporting problems with various files, for example, missing blocks 
for a file or under-replicated blocks. Unlike a traditional fsck utility for 
native file systems, this command does not correct the errors it detects. 
Normally NameNode automatically corrects most of the recoverable failures. By 
default fsck ignores open files but provides an option to select all files 
during reporting. The HDFS fsck command is not a Hadoop shell command. It can 
be run as `bin/hdfs fsck`. For command usage, see 
[fsck](./HDFSCommands.html#fsck). fsck can be run on the whole file system or 
on a subset of files.
+HDFS supports the fsck command to check for various inconsistencies. It is 
designed for reporting problems with various files, for example, missing blocks 
for a file or under-replicated blocks. Unlike a traditional fsck utility for 
native file systems, this command does not correct the errors it detects. 
Normally NameNode automatically corrects most of the recoverable failures. By 
default fsck ignores open files but provides an option to select all files 
during reporting. The HDFS fsck command is not a Hadoop shell command. It can 
be run as `bin/hdfs fsck`. For command usage, see 
[fsck](./HDFSCommands.html#fsck). fsck can be run on the whole file system or 
on a subset of files.
 
 fetchdt
 ---


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12409. Add metrics of execution time of different stages in EC recovery task. (Lei (Eddy) Xu)

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 58eacdbb4 -> d9dd85673


HDFS-12409. Add metrics of execution time of different stages in EC recovery 
task. (Lei (Eddy) Xu)

(cherry picked from commit 73aed34dffa5e79f6f819137b69054c1dee2d4dd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9dd8567
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9dd8567
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9dd8567

Branch: refs/heads/branch-3.0
Commit: d9dd85673962ba22c57c307efe8ad3fca3a1604a
Parents: 58eacdb
Author: Lei Xu 
Authored: Wed Sep 13 17:10:16 2017 -0700
Committer: Andrew Wang 
Committed: Thu Sep 28 10:21:54 2017 -0700

--
 .../erasurecode/StripedBlockReconstructor.java| 11 +++
 .../server/datanode/metrics/DataNodeMetrics.java  | 18 ++
 .../TestDataNodeErasureCodingMetrics.java |  7 +++
 3 files changed, 36 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9dd8567/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
index bac013a..34e58ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
@@ -22,6 +22,7 @@ import java.nio.ByteBuffer;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
+import org.apache.hadoop.util.Time;
 
 /**
  * StripedBlockReconstructor reconstruct one or more missed striped block in
@@ -83,18 +84,28 @@ class StripedBlockReconstructor extends StripedReconstructor
   final int toReconstructLen =
   (int) Math.min(getStripedReader().getBufferSize(), remaining);
 
+  long start = Time.monotonicNow();
   // step1: read from minimum source DNs required for reconstruction.
   // The returned success list is the source DNs we do real read from
   getStripedReader().readMinimumSources(toReconstructLen);
+  long readEnd = Time.monotonicNow();
 
   // step2: decode to reconstruct targets
   reconstructTargets(toReconstructLen);
+  long decodeEnd = Time.monotonicNow();
 
   // step3: transfer data
   if (stripedWriter.transferData2Targets() == 0) {
 String error = "Transfer failed for all targets.";
 throw new IOException(error);
   }
+  long writeEnd = Time.monotonicNow();
+
+  // Only the succeed reconstructions are recorded.
+  final DataNodeMetrics metrics = getDatanode().getMetrics();
+  metrics.incrECReconstructionReadTime(readEnd - start);
+  metrics.incrECReconstructionDecodingTime(decodeEnd - readEnd);
+  metrics.incrECReconstructionWriteTime(writeEnd - decodeEnd);
 
   updatePositionInBlock(toReconstructLen);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9dd8567/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index a8a6919..58a2f65 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -151,6 +151,12 @@ public class DataNodeMetrics {
   MutableCounterLong ecReconstructionBytesWritten;
   @Metric("Bytes remote read by erasure coding worker")
   MutableCounterLong ecReconstructionRemoteBytesRead;
+  @Metric("Milliseconds spent on read by erasure coding worker")
+  private MutableCounterLong ecReconstructionReadTimeMillis;
+  @Metric("Milliseconds spent on decoding by erasure coding worker")
+  private MutableCounterLong ecReconstructionDecodingTimeMillis;
+  @Metric("Milliseconds spent on write by erasure coding worker")
+  private MutableCounterLong ecReconstructionWriteTimeMillis;
 
   final MetricsRegistry registry = new MetricsRegistry("datanode");
   final 

hadoop git commit: HDFS-12412. Change ErasureCodingWorker.stripedReadPool to cached thread pool. (Lei (Eddy) Xu)

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 7f371da52 -> 58eacdbb4


HDFS-12412. Change ErasureCodingWorker.stripedReadPool to cached thread pool. 
(Lei (Eddy) Xu)

(cherry picked from commit 123342cd0759ff88801d4f5ab10987f6e3f344b0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58eacdbb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58eacdbb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58eacdbb

Branch: refs/heads/branch-3.0
Commit: 58eacdbb42471dbad21330bbb96433d0f95f85d5
Parents: 7f371da
Author: Lei Xu 
Authored: Tue Sep 12 18:12:07 2017 -0700
Committer: Andrew Wang 
Committed: Thu Sep 28 10:21:24 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java|  2 --
 .../datanode/erasurecode/ErasureCodingWorker.java | 14 +++---
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml   |  9 -
 .../src/site/markdown/HDFSErasureCoding.md|  1 -
 4 files changed, 7 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58eacdbb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 512ca20..b056e29 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -569,8 +569,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   "dfs.namenode.ec.system.default.policy";
   public static final String  DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT =
   "RS-6-3-1024k";
-  public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.threads";
-  public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20;
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.buffer.size";
   public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_DEFAULT = 64 * 1024;
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.timeout.millis";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58eacdbb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 70c5378..63498bc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -55,19 +55,19 @@ public final class ErasureCodingWorker {
 this.datanode = datanode;
 this.conf = conf;
 
-initializeStripedReadThreadPool(conf.getInt(
-DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY,
-DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT));
+initializeStripedReadThreadPool();
 initializeStripedBlkReconstructionThreadPool(conf.getInt(
 DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_THREADS_KEY,
 DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_THREADS_DEFAULT));
   }
 
-  private void initializeStripedReadThreadPool(int num) {
-LOG.debug("Using striped reads; pool threads={}", num);
+  private void initializeStripedReadThreadPool() {
+LOG.debug("Using striped reads");
 
-stripedReadPool = new ThreadPoolExecutor(1, num, 60, TimeUnit.SECONDS,
-new SynchronousQueue(),
+// Essentially, this is a cachedThreadPool.
+stripedReadPool = new ThreadPoolExecutor(0, Integer.MAX_VALUE,
+60, TimeUnit.SECONDS,
+new SynchronousQueue<>(),
 new Daemon.DaemonFactory() {
   private final AtomicInteger threadIndex = new AtomicInteger(0);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58eacdbb/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 

hadoop git commit: HDFS-12496. Make QuorumJournalManager timeout properties configurable. Contributed by Ajay Kumar.

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 f8df655f3 -> 14a05ee4c


HDFS-12496. Make QuorumJournalManager timeout properties configurable. 
Contributed by Ajay Kumar.

(cherry picked from commit b9e423fa8d30ea89244f6ec018a8064cc87d94a9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14a05ee4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14a05ee4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14a05ee4

Branch: refs/heads/branch-3.0
Commit: 14a05ee4c1e97eedf1949ed3ba5f1683034262bd
Parents: f8df655
Author: Arpit Agarwal 
Authored: Thu Sep 21 08:44:43 2017 -0700
Committer: Andrew Wang 
Committed: Thu Sep 28 10:19:11 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../qjournal/client/QuorumJournalManager.java   | 39 +---
 .../src/main/resources/hdfs-default.xml | 11 ++
 3 files changed, 33 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14a05ee4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 319654c..512ca20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -727,6 +727,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.edit.log.transfer.bandwidthPerSec";
   public static final long DFS_EDIT_LOG_TRANSFER_RATE_DEFAULT = 0; //no 
throttling
 
+  public static final String DFS_QJM_OPERATIONS_TIMEOUT =
+  "dfs.qjm.operations.timeout";
+  public static final long DFS_QJM_OPERATIONS_TIMEOUT_DEFAULT = 6;
+
   // Datanode File IO Stats
   public static final String DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY =
   "dfs.datanode.enable.fileio.fault.injection";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14a05ee4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index 97c0050..f66e2c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -27,6 +27,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.PriorityQueue;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
@@ -76,18 +77,10 @@ public class QuorumJournalManager implements JournalManager 
{
   private final int newEpochTimeoutMs;
   private final int writeTxnsTimeoutMs;
 
-  // Since these don't occur during normal operation, we can
-  // use rather lengthy timeouts, and don't need to make them
-  // configurable.
-  private static final int FORMAT_TIMEOUT_MS= 6;
-  private static final int HASDATA_TIMEOUT_MS   = 6;
-  private static final int CAN_ROLL_BACK_TIMEOUT_MS = 6;
-  private static final int FINALIZE_TIMEOUT_MS  = 6;
-  private static final int PRE_UPGRADE_TIMEOUT_MS   = 6;
-  private static final int ROLL_BACK_TIMEOUT_MS = 6;
-  private static final int DISCARD_SEGMENTS_TIMEOUT_MS  = 6;
-  private static final int UPGRADE_TIMEOUT_MS   = 6;
-  private static final int GET_JOURNAL_CTIME_TIMEOUT_MS = 6;
+  // This timeout is used for calls that don't occur during normal operation
+  // e.g. format, upgrade operations and a few others. So we can use rather
+  // lengthy timeouts by default.
+  private final int timeoutMs;
   
   private final Configuration conf;
   private final URI uri;
@@ -141,6 +134,10 @@ public class QuorumJournalManager implements 
JournalManager {
 this.writeTxnsTimeoutMs = conf.getInt(
 DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_KEY,
 DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT);
+this.timeoutMs = (int) conf.getTimeDuration(DFSConfigKeys
+.DFS_QJM_OPERATIONS_TIMEOUT,
+DFSConfigKeys.DFS_QJM_OPERATIONS_TIMEOUT_DEFAULT, TimeUnit
+

hadoop git commit: HDFS-12470. DiskBalancer: Some tests create plan files under system directory. Contributed by Hanisha Koneru.

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 14a05ee4c -> 7f371da52


HDFS-12470. DiskBalancer: Some tests create plan files under system directory. 
Contributed by Hanisha Koneru.

(cherry picked from commit a2dcba18531c6fa4b76325f5132773f12ddfc6d5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f371da5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f371da5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f371da5

Branch: refs/heads/branch-3.0
Commit: 7f371da52ab6c130d5d6a579e8007dcbaee8
Parents: 14a05ee
Author: Arpit Agarwal 
Authored: Mon Sep 18 09:53:24 2017 -0700
Committer: Andrew Wang 
Committed: Thu Sep 28 10:19:49 2017 -0700

--
 .../server/diskbalancer/command/TestDiskBalancerCommand.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f371da5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index b0b0b0c..1cebae0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -476,9 +476,12 @@ public class TestDiskBalancerCommand {
   public void testPlanJsonNode() throws Exception {
 final String planArg = String.format("-%s %s", PLAN,
 "a87654a9-54c7-4693-8dd9-c9c7021dc340");
+final Path testPath = new Path(
+PathUtils.getTestPath(getClass()),
+GenericTestUtils.getMethodName());
 final String cmdLine = String
 .format(
-"hdfs diskbalancer %s", planArg);
+"hdfs diskbalancer -out %s %s", testPath, planArg);
 runCommand(cmdLine);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12530. Processor argument in Offline Image Viewer should be case insensitive. Contributed by Hanisha Koneru.

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 7b49de5e3 -> f8df655f3


HDFS-12530. Processor argument in Offline Image Viewer should be case 
insensitive. Contributed by Hanisha Koneru.

(cherry picked from commit 08fca508e66e8eddc5d8fd1608ec0c9cd54fc990)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8df655f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8df655f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8df655f

Branch: refs/heads/branch-3.0
Commit: f8df655f315d517a62e75962718da8bb30c3c0ec
Parents: 7b49de5
Author: Arpit Agarwal 
Authored: Fri Sep 22 09:47:57 2017 -0700
Committer: Andrew Wang 
Committed: Thu Sep 28 10:18:22 2017 -0700

--
 .../tools/offlineImageViewer/OfflineImageViewerPB.java   | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8df655f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
index c1141f3..0f2ac81 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
@@ -33,6 +33,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.StringUtils;
 
 /**
  * OfflineImageViewerPB to dump the contents of an Hadoop image file to XML or
@@ -174,8 +175,8 @@ public class OfflineImageViewerPB {
 Configuration conf = new Configuration();
 try (PrintStream out = outputFile.equals("-") ?
 System.out : new PrintStream(outputFile, "UTF-8")) {
-  switch (processor) {
-  case "FileDistribution":
+  switch (StringUtils.toUpperCase(processor)) {
+  case "FILEDISTRIBUTION":
 long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
 int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
 boolean formatOutput = cmd.hasOption("format");
@@ -186,7 +187,7 @@ public class OfflineImageViewerPB {
 new PBImageXmlWriter(conf, out).visit(new RandomAccessFile(inputFile,
 "r"));
 break;
-  case "ReverseXML":
+  case "REVERSEXML":
 try {
   OfflineImageReconstructor.run(inputFile, outputFile);
 } catch (Exception e) {
@@ -196,14 +197,14 @@ public class OfflineImageViewerPB {
   System.exit(1);
 }
 break;
-  case "Web":
+  case "WEB":
 String addr = cmd.getOptionValue("addr", "localhost:5978");
 try (WebImageViewer viewer =
 new WebImageViewer(NetUtils.createSocketAddr(addr))) {
   viewer.start(inputFile);
 }
 break;
-  case "Delimited":
+  case "DELIMITED":
 try (PBImageDelimitedTextWriter writer =
 new PBImageDelimitedTextWriter(out, delimiter, tempPath)) {
   writer.visit(new RandomAccessFile(inputFile, "r"));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6622. Document Docker work as experimental (Contributed by Varun Vasudev)

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 2ab2a9438 -> 7b49de5e3


YARN-6622. Document Docker work as experimental (Contributed by Varun Vasudev)

(cherry picked from commit 6651cbcc72d92caf86b744fa76fba513b36b12c7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b49de5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b49de5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b49de5e

Branch: refs/heads/branch-3.0
Commit: 7b49de5e300f2a51dee33e52026207870fc34ffb
Parents: 2ab2a94
Author: Daniel Templeton 
Authored: Mon Sep 11 16:14:31 2017 -0700
Committer: Andrew Wang 
Committed: Thu Sep 28 10:16:45 2017 -0700

--
 .../hadoop-yarn-site/src/site/markdown/DockerContainers.md | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b49de5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index bf94169..23f4134 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -17,6 +17,12 @@ Launching Applications Using Docker Containers
 
 
 
+Notice
+--
+This feature is experimental and is not complete. Enabling this feature and
+running Docker containers in your cluster has security implications.
+Please do a security analysis before enabling this feature.
+
 Overview
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12458. TestReencryptionWithKMS fails regularly. Contributed by Xiao Chen.

2017-09-28 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 81e787279 -> 2ab2a9438


HDFS-12458. TestReencryptionWithKMS fails regularly. Contributed by Xiao Chen.

(cherry picked from commit 7c34ceaf4fa28e2ecabd6626860bb1c7418e4b8d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ab2a943
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ab2a943
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ab2a943

Branch: refs/heads/branch-3.0
Commit: 2ab2a9438fc203ce094fac3a807472c77b10f38c
Parents: 81e7872
Author: Wei-Chiu Chuang 
Authored: Thu Sep 28 05:09:37 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Sep 28 05:11:49 2017 -0700

--
 .../hadoop/hdfs/server/namenode/TestReencryption.java  | 13 +++--
 .../hdfs/server/namenode/TestReencryptionHandler.java  |  7 ---
 2 files changed, 11 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ab2a943/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
index 33c52bf..4bf6aa4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
@@ -28,7 +28,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -129,11 +128,9 @@ public class TestReencryption {
 conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 3);
 // Adjust configs for re-encrypt test cases
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_REENCRYPT_BATCH_SIZE_KEY, 5);
-conf.setTimeDuration(
-DFSConfigKeys.DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY, 1,
-TimeUnit.SECONDS);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
 cluster.waitActive();
+cluster.waitClusterUp();
 fs = cluster.getFileSystem();
 fsn = cluster.getNamesystem();
 fsWrapper = new FileSystemTestWrapper(fs);
@@ -1284,6 +1281,7 @@ public class TestReencryption {
 fsn = cluster.getNamesystem();
 getEzManager().pauseReencryptForTesting();
 cluster.waitActive();
+cluster.waitClusterUp();
   }
 
   private void waitForReencryptedZones(final int expected)
@@ -1519,6 +1517,7 @@ public class TestReencryption {
 getEzManager().pauseReencryptForTesting();
 dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
 waitForQueuedZones(1);
+getEzManager().pauseReencryptUpdaterForTesting();
 getEzManager().resumeReencryptForTesting();
 
 LOG.info("Waiting for re-encrypt callables to run");
@@ -1529,7 +1528,6 @@ public class TestReencryption {
   }
 }, 100, 1);
 
-getEzManager().pauseReencryptUpdaterForTesting();
 dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.CANCEL);
 
 // now resume updater and verify status.
@@ -1609,7 +1607,7 @@ public class TestReencryption {
 cluster.getConfiguration(0)
 
.unset(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
 cluster.restartNameNodes();
-cluster.waitActive();
+cluster.waitClusterUp();
 
 // test re-encrypt should fail
 try {
@@ -1673,6 +1671,9 @@ public class TestReencryption {
 }
 
 fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+// trigger the background thread to run, without having to
+// wait for DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY
+getHandler().notifyNewSubmission();
 waitForReencryptedFiles(zone.toString(), 10);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ab2a943/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
index f0dd92c..e2035ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
@@ 

hadoop git commit: HDFS-12458. TestReencryptionWithKMS fails regularly. Contributed by Xiao Chen.

2017-09-28 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 28c4957fc -> 7c34ceaf4


HDFS-12458. TestReencryptionWithKMS fails regularly. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c34ceaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c34ceaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c34ceaf

Branch: refs/heads/trunk
Commit: 7c34ceaf4fa28e2ecabd6626860bb1c7418e4b8d
Parents: 28c4957
Author: Wei-Chiu Chuang 
Authored: Thu Sep 28 05:09:37 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Sep 28 05:10:26 2017 -0700

--
 .../hadoop/hdfs/server/namenode/TestReencryption.java  | 13 +++--
 .../hdfs/server/namenode/TestReencryptionHandler.java  |  7 ---
 2 files changed, 11 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c34ceaf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
index 33c52bf..4bf6aa4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
@@ -28,7 +28,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -129,11 +128,9 @@ public class TestReencryption {
 conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 3);
 // Adjust configs for re-encrypt test cases
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_REENCRYPT_BATCH_SIZE_KEY, 5);
-conf.setTimeDuration(
-DFSConfigKeys.DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY, 1,
-TimeUnit.SECONDS);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
 cluster.waitActive();
+cluster.waitClusterUp();
 fs = cluster.getFileSystem();
 fsn = cluster.getNamesystem();
 fsWrapper = new FileSystemTestWrapper(fs);
@@ -1284,6 +1281,7 @@ public class TestReencryption {
 fsn = cluster.getNamesystem();
 getEzManager().pauseReencryptForTesting();
 cluster.waitActive();
+cluster.waitClusterUp();
   }
 
   private void waitForReencryptedZones(final int expected)
@@ -1519,6 +1517,7 @@ public class TestReencryption {
 getEzManager().pauseReencryptForTesting();
 dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
 waitForQueuedZones(1);
+getEzManager().pauseReencryptUpdaterForTesting();
 getEzManager().resumeReencryptForTesting();
 
 LOG.info("Waiting for re-encrypt callables to run");
@@ -1529,7 +1528,6 @@ public class TestReencryption {
   }
 }, 100, 1);
 
-getEzManager().pauseReencryptUpdaterForTesting();
 dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.CANCEL);
 
 // now resume updater and verify status.
@@ -1609,7 +1607,7 @@ public class TestReencryption {
 cluster.getConfiguration(0)
 
.unset(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
 cluster.restartNameNodes();
-cluster.waitActive();
+cluster.waitClusterUp();
 
 // test re-encrypt should fail
 try {
@@ -1673,6 +1671,9 @@ public class TestReencryption {
 }
 
 fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+// trigger the background thread to run, without having to
+// wait for DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY
+getHandler().notifyNewSubmission();
 waitForReencryptedFiles(zone.toString(), 10);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c34ceaf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
index f0dd92c..e2035ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
@@ -101,9 +101,9 @@ public class TestReencryptionHandler {
 final StopWatch sw 

hadoop git commit: HDFS-12540. Ozone: node status text reported by SCM is a bit confusing. Contributed by Weiwei Yang.

2017-09-28 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 7213e9a6f -> 5519edce5


HDFS-12540. Ozone: node status text reported by SCM is a bit confusing. 
Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5519edce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5519edce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5519edce

Branch: refs/heads/HDFS-7240
Commit: 5519edce58ac0541c4c8424d286d92ef450557a4
Parents: 7213e9a
Author: Weiwei Yang 
Authored: Thu Sep 28 14:32:43 2017 +0800
Committer: Weiwei Yang 
Committed: Thu Sep 28 14:32:43 2017 +0800

--
 .../org/apache/hadoop/ozone/scm/node/SCMNodeManager.java  | 10 +-
 .../org/apache/hadoop/ozone/scm/node/TestNodeManager.java |  2 +-
 2 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5519edce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
index 6e2805a..9f73606 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java
@@ -320,7 +320,7 @@ public class SCMNodeManager
 if (isOutOfNodeChillMode()) {
   return "Out of chill mode." + getNodeStatus();
 } else {
-  return "Still in chill mode. Waiting on nodes to report in."
+  return "Still in chill mode, waiting on nodes to report in."
   + getNodeStatus();
 }
   }
@@ -330,10 +330,10 @@ public class SCMNodeManager
* @return - String
*/
   private String getNodeStatus() {
-final String chillModeStatus = " %d of out of total "
-+ "%d nodes have reported in.";
-return String.format(chillModeStatus, totalNodes.get(),
-getMinimumChillModeNodes());
+return isOutOfNodeChillMode() ?
+String.format(" %d nodes have reported in.", totalNodes.get()) :
+String.format(" %d nodes reported, minimal %d nodes required.",
+totalNodes.get(), getMinimumChillModeNodes());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5519edce/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
index c296169..e8bf0ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
@@ -896,7 +896,7 @@ public class TestNodeManager {
   nodeManager.sendHeartbeat(datanodeID, null);
   String status = nodeManager.getChillModeStatus();
   Assert.assertThat(status, CoreMatchers.containsString("Still in chill " +
-  "mode. Waiting on nodes to report in."));
+  "mode, waiting on nodes to report in."));
 
   // Should not exit chill mode since 10 nodes have not heartbeat yet.
   assertFalse(nodeManager.isOutOfNodeChillMode());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org