[hadoop] 03/05: Add documentation

2022-02-22 Thread gaurava
This is an automated email from the ASF dual-hosted git repository.

gaurava pushed a commit to branch get-x-platform
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 95f1a522d93212163e7b0cf8463b5e316627a0c9
Author: Gautham Banasandra 
AuthorDate: Sun Feb 20 12:02:14 2022 +0530

Add documentation
---
 .../native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.h
index 9b0aa52..0137f1e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.h
@@ -81,6 +81,9 @@ protected:
   [[nodiscard]] virtual bool HandlePath(const std::string ,
 const std::string ) const;
 
+  /**
+   * @return The name of the tool.
+   */
   [[nodiscard]] virtual std::string GetToolName() const;
 
 private:

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 04/05: Remove redundant file

2022-02-22 Thread gaurava
This is an automated email from the ASF dual-hosted git repository.

gaurava pushed a commit to branch get-x-platform
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 76a547fc34a1fa7668befc9b277916646c9a5d32
Author: Gautham Banasandra 
AuthorDate: Sun Feb 20 12:26:22 2022 +0530

Remove redundant file
---
 .../src/main/native/libhdfspp/tools/hdfs_get.cc| 88 --
 1 file changed, 88 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_get.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_get.cc
deleted file mode 100644
index 16dd72d..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_get.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing,
-  software distributed under the License is distributed on an
-  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  KIND, either express or implied.  See the License for the
-  specific language governing permissions and limitations
-  under the License.
-*/
-
-#include 
-#include 
-#include "tools_common.h"
-
-void usage(){
-  std::cout << "Usage: hdfs_get [OPTION] SRC_FILE DST_FILE"
-  << std::endl
-  << std::endl << "Copy SRC_FILE from hdfs to DST_FILE on the local file 
system."
-  << std::endl
-  << std::endl << "  -h  display this help and exit"
-  << std::endl
-  << std::endl << "Examples:"
-  << std::endl << "hdfs_get hdfs://localhost.localdomain:8020/dir/file 
/home/usr/myfile"
-  << std::endl << "hdfs_get /dir/file /home/usr/dir/file"
-  << std::endl;
-}
-
-int main(int argc, char *argv[]) {
-  if (argc > 4) {
-usage();
-exit(EXIT_FAILURE);
-  }
-
-  int input;
-
-  //Using GetOpt to read in the values
-  opterr = 0;
-  while ((input = getopt(argc, argv, "h")) != -1) {
-switch (input)
-{
-case 'h':
-  usage();
-  exit(EXIT_SUCCESS);
-case '?':
-  if (isprint(optopt))
-std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
-  else
-std::cerr << "Unknown option character `" << (char) optopt << "'." << 
std::endl;
-  usage();
-  exit(EXIT_FAILURE);
-default:
-  exit(EXIT_FAILURE);
-}
-  }
-
-  std::string uri_path = argv[optind];
-  std::string dest = argv[optind+1];
-
-  //Building a URI object from the given uri_path
-  hdfs::URI uri = hdfs::parse_path_or_exit(uri_path);
-
-  std::shared_ptr fs = hdfs::doConnect(uri, false);
-  if (!fs) {
-std::cerr << "Could not connect the file system. " << std::endl;
-exit(EXIT_FAILURE);
-  }
-
-  std::FILE* dst_file = std::fopen(dest.c_str(), "wb");
-  if(!dst_file){
-std::cerr << "Unable to open the destination file: " << dest << std::endl;
-exit(EXIT_FAILURE);
-  }
-  readFile(fs, uri.get_path(), 0, dst_file, false);
-  std::fclose(dst_file);
-
-  // Clean up static data and prevent valgrind memory leaks
-  google::protobuf::ShutdownProtobufLibrary();
-  return 0;
-}

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 05/05: Add documentation

2022-02-22 Thread gaurava
This is an automated email from the ASF dual-hosted git repository.

gaurava pushed a commit to branch get-x-platform
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit c29a2316e9e54e9471b6ddafa3b9af6a913705bf
Author: Gautham Banasandra 
AuthorDate: Sun Feb 20 12:37:05 2022 +0530

Add documentation
---
 .../src/main/native/libhdfspp/tools/hdfs-get/hdfs-get.h| 3 +++
 1 file changed, 3 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-get/hdfs-get.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-get/hdfs-get.h
index ceb7478..8153264 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-get/hdfs-get.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-get/hdfs-get.h
@@ -37,6 +37,9 @@ public:
   ~Get() override = default;
 
 protected:
+  /**
+   * {@inheritdoc}
+   */
   [[nodiscard]] std::string GetToolName() const override;
 };
 } // namespace hdfs::tools

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/05: Add unit tests

2022-02-22 Thread gaurava
This is an automated email from the ASF dual-hosted git repository.

gaurava pushed a commit to branch get-x-platform
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit fdbd121e71a92c6c4c120fe1288b3e565e07b946
Author: Gautham Banasandra 
AuthorDate: Sun Feb 20 12:01:07 2022 +0530

Add unit tests
---
 .../native/libhdfspp/tests/tools/CMakeLists.txt|  3 +
 .../native/libhdfspp/tests/tools/hdfs-get-mock.cc  | 56 ++
 .../native/libhdfspp/tests/tools/hdfs-get-mock.h   | 68 ++
 .../libhdfspp/tests/tools/hdfs-tool-tests.cc   | 14 +
 4 files changed, 141 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/CMakeLists.txt
index 56755ad..769e5da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/CMakeLists.txt
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/CMakeLists.txt
@@ -35,6 +35,7 @@ add_executable(hdfs_tool_tests
 hdfs-count-mock.cc
 hdfs-mkdir-mock.cc
 hdfs-rm-mock.cc
+hdfs-get-mock.cc
 main.cc)
 target_include_directories(hdfs_tool_tests PRIVATE
 ../tools
@@ -54,6 +55,7 @@ target_include_directories(hdfs_tool_tests PRIVATE
 ../../tools/hdfs-count
 ../../tools/hdfs-mkdir
 ../../tools/hdfs-rm
+../../tools/hdfs-get
 ../../tools/hdfs-cat)
 target_link_libraries(hdfs_tool_tests PRIVATE
gmock_main
@@ -72,5 +74,6 @@ target_link_libraries(hdfs_tool_tests PRIVATE
hdfs_count_lib
hdfs_mkdir_lib
hdfs_rm_lib
+   hdfs_get_lib
hdfs_cat_lib)
 add_test(hdfs_tool_tests hdfs_tool_tests)
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-get-mock.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-get-mock.cc
new file mode 100644
index 000..713564e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-get-mock.cc
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+
+#include "hdfs-get-mock.h"
+#include "hdfs-tool-tests.h"
+
+namespace hdfs::tools::test {
+GetMock::~GetMock() = default;
+
+void GetMock::SetExpectations(
+std::function()> test_case,
+const std::vector ) const {
+  // Get the pointer to the function that defines the test case
+  const auto test_case_func =
+  test_case.target (*)()>();
+  ASSERT_NE(test_case_func, nullptr);
+
+  // Set the expected method calls and their corresponding arguments for each
+  // test case
+  if (*test_case_func == ) {
+EXPECT_CALL(*this, HandleHelp()).Times(1).WillOnce(testing::Return(true));
+return;
+  }
+
+  if (*test_case_func == ) {
+const auto arg1 = args[0];
+const auto arg2 = args[1];
+EXPECT_CALL(*this, HandlePath(arg1, arg2))
+.Times(1)
+.WillOnce(testing::Return(true));
+  }
+}
+} // namespace hdfs::tools::test
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-get-mock.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-get-mock.h
new file mode 100644
index 000..535f715
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-get-mock.h
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may 

[hadoop] 01/05: Make HDFS get tool cross platform

2022-02-22 Thread gaurava
This is an automated email from the ASF dual-hosted git repository.

gaurava pushed a commit to branch get-x-platform
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit ab82f9aec055a56e7ada68abf9ca53ad54e0880b
Author: Gautham Banasandra 
AuthorDate: Sun Feb 20 11:59:46 2022 +0530

Make HDFS get tool cross platform
---
 .../src/main/native/libhdfspp/tools/CMakeLists.txt |  3 +-
 .../tools/hdfs-copy-to-local/hdfs-copy-to-local.cc | 14 --
 .../tools/hdfs-copy-to-local/hdfs-copy-to-local.h  |  2 +
 .../native/libhdfspp/tools/hdfs-get/CMakeLists.txt | 27 +++
 .../native/libhdfspp/tools/hdfs-get/hdfs-get.cc| 25 +++
 .../native/libhdfspp/tools/hdfs-get/hdfs-get.h | 44 ++
 .../main/native/libhdfspp/tools/hdfs-get/main.cc   | 52 ++
 7 files changed, 161 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt
index aa82c01..bed78b7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt
@@ -60,8 +60,7 @@ add_subdirectory(hdfs-df)
 
 add_subdirectory(hdfs-du)
 
-add_executable(hdfs_get hdfs_get.cc)
-target_link_libraries(hdfs_get tools_common hdfspp_static)
+add_subdirectory(hdfs-get)
 
 add_subdirectory(hdfs-copy-to-local)
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.cc
index 9219b36..7affa1f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.cc
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.cc
@@ -64,7 +64,8 @@ bool CopyToLocal::ValidateConstraints() const {
 
 std::string CopyToLocal::GetDescription() const {
   std::stringstream desc;
-  desc << "Usage: hdfs_copyToLocal [OPTION] SRC_FILE DST_FILE" << std::endl
+  desc << "Usage: hdfs_" << GetToolName() << " [OPTION] SRC_FILE DST_FILE"
+   << std::endl
<< std::endl
<< "Copy SRC_FILE from hdfs to DST_FILE on the local file system."
<< std::endl
@@ -72,16 +73,19 @@ std::string CopyToLocal::GetDescription() const {
<< "  -h  display this help and exit" << std::endl
<< std::endl
<< "Examples:" << std::endl
-   << "hdfs_copyToLocal hdfs://localhost.localdomain:8020/dir/file "
+   << "hdfs_" << GetToolName()
+   << " hdfs://localhost.localdomain:8020/dir/file "
   "/home/usr/myfile"
<< std::endl
-   << "hdfs_copyToLocal /dir/file /home/usr/dir/file" << std::endl;
+   << "hdfs_" << GetToolName() << " /dir/file /home/usr/dir/file"
+   << std::endl;
   return desc.str();
 }
 
 bool CopyToLocal::Do() {
   if (!Initialize()) {
-std::cerr << "Unable to initialize HDFS copyToLocal tool" << std::endl;
+std::cerr << "Unable to initialize HDFS " << GetToolName() << " tool"
+  << std::endl;
 return false;
   }
 
@@ -129,4 +133,6 @@ bool CopyToLocal::HandlePath(const std::string ,
   std::fclose(dst_file);
   return true;
 }
+
+std::string CopyToLocal::GetToolName() const { return "copyToLocal"; }
 } // namespace hdfs::tools
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.h
index 6eb8cf1..9b0aa52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-copy-to-local/hdfs-copy-to-local.h
@@ -81,6 +81,8 @@ protected:
   [[nodiscard]] virtual bool HandlePath(const std::string ,
 const std::string ) const;
 
+  [[nodiscard]] virtual std::string GetToolName() const;
+
 private:
   /**
* A boost data-structure containing the description of positional arguments
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-get/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-get/CMakeLists.txt
new file mode 100644
index 000..367bca6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-get/CMakeLists.txt
@@ -0,0 +1,27 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed 

[hadoop] branch get-x-platform created (now c29a231)

2022-02-22 Thread gaurava
This is an automated email from the ASF dual-hosted git repository.

gaurava pushed a change to branch get-x-platform
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at c29a231  Add documentation

This branch includes the following new commits:

 new ab82f9a  Make HDFS get tool cross platform
 new fdbd121  Add unit tests
 new 95f1a52  Add documentation
 new 76a547f  Remove redundant file
 new c29a231  Add documentation

The 5 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-11041. Unable to unregister FsDatasetState MBean if DataNode is shutdown twice. Contributed by Wei-Chiu Chuang.

2022-02-22 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 551f8d3  HDFS-11041. Unable to unregister FsDatasetState MBean if 
DataNode is shutdown twice. Contributed by Wei-Chiu Chuang.
551f8d3 is described below

commit 551f8d3ddc674d94343eacd63f11ce95564c7138
Author: Ayush Saxena 
AuthorDate: Wed Jun 3 12:47:15 2020 +0530

HDFS-11041. Unable to unregister FsDatasetState MBean if DataNode is 
shutdown twice. Contributed by Wei-Chiu Chuang.

(cherry picked from commit e8cb2ae409bc1d62f23efef485d1c6f1ff21e86c)
(cherry picked from commit fa30224e95d920adaea0ec65c360359044e3f754)
---
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java| 1 +
 .../org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java   | 5 -
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 045c8cd..1979381 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2360,6 +2360,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
 
 if (mbeanName != null) {
   MBeans.unregister(mbeanName);
+  mbeanName = null;
 }
 
 if (asyncDiskService != null) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index cb7bbdb..d19daf2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -1362,7 +1362,10 @@ public class SimulatedFSDataset implements 
FsDatasetSpi {
 
   @Override
   public void shutdown() {
-if (mbeanName != null) MBeans.unregister(mbeanName);
+if (mbeanName != null) {
+  MBeans.unregister(mbeanName);
+  mbeanName = null;
+}
   }
 
   @Override

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-11041. Unable to unregister FsDatasetState MBean if DataNode is shutdown twice. Contributed by Wei-Chiu Chuang.

2022-02-22 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new fa30224  HDFS-11041. Unable to unregister FsDatasetState MBean if 
DataNode is shutdown twice. Contributed by Wei-Chiu Chuang.
fa30224 is described below

commit fa30224e95d920adaea0ec65c360359044e3f754
Author: Ayush Saxena 
AuthorDate: Wed Jun 3 12:47:15 2020 +0530

HDFS-11041. Unable to unregister FsDatasetState MBean if DataNode is 
shutdown twice. Contributed by Wei-Chiu Chuang.

(cherry picked from commit e8cb2ae409bc1d62f23efef485d1c6f1ff21e86c)
---
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java| 1 +
 .../org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java   | 5 -
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 2ab4b83..d263d7d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2353,6 +2353,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
 
 if (mbeanName != null) {
   MBeans.unregister(mbeanName);
+  mbeanName = null;
 }
 
 if (asyncDiskService != null) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 113da58..417ad3c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -1367,7 +1367,10 @@ public class SimulatedFSDataset implements 
FsDatasetSpi {
 
   @Override
   public void shutdown() {
-if (mbeanName != null) MBeans.unregister(mbeanName);
+if (mbeanName != null) {
+  MBeans.unregister(mbeanName);
+  mbeanName = null;
+}
   }
 
   @Override

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-11041. Unable to unregister FsDatasetState MBean if DataNode is shutdown twice. Contributed by Wei-Chiu Chuang.

2022-02-22 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new f36d61a  HDFS-11041. Unable to unregister FsDatasetState MBean if 
DataNode is shutdown twice. Contributed by Wei-Chiu Chuang.
f36d61a is described below

commit f36d61a0c2ae732ea4e6b1e8762a5082b0d1cbf7
Author: Ayush Saxena 
AuthorDate: Wed Jun 3 12:47:15 2020 +0530

HDFS-11041. Unable to unregister FsDatasetState MBean if DataNode is 
shutdown twice. Contributed by Wei-Chiu Chuang.

(cherry picked from commit e8cb2ae409bc1d62f23efef485d1c6f1ff21e86c)
(cherry picked from commit fa30224e95d920adaea0ec65c360359044e3f754)
(cherry picked from commit 551f8d3ddc674d94343eacd63f11ce95564c7138)
---
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java| 1 +
 .../org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java   | 5 -
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 9977b96..81b0d67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2410,6 +2410,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
 
 if (mbeanName != null) {
   MBeans.unregister(mbeanName);
+  mbeanName = null;
 }
 
 if (asyncDiskService != null) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index a36a9a2..b4526fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -1318,7 +1318,10 @@ public class SimulatedFSDataset implements 
FsDatasetSpi {
 
   @Override
   public void shutdown() {
-if (mbeanName != null) MBeans.unregister(mbeanName);
+if (mbeanName != null) {
+  MBeans.unregister(mbeanName);
+  mbeanName = null;
+}
   }
 
   @Override

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch HADOOP-18127 created (now 3682078)

2022-02-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a change to branch HADOOP-18127
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at 3682078  HADOOP-13055. Implement linkMergeSlash and linkFallback for 
ViewFileSystem

This branch includes the following new commits:

 new 0bcc3c7  HADOOP-13722. Code cleanup -- ViewFileSystem and InodeTree. 
Contributed by Manoj Govindassamy.
 new aa8eb78  HADOOP-12077. Provide a multi-URI replication Inode for 
ViewFs. Contributed by Gera Shegalov
 new 3682078  HADOOP-13055. Implement linkMergeSlash and linkFallback for 
ViewFileSystem

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/03: HADOOP-13722. Code cleanup -- ViewFileSystem and InodeTree. Contributed by Manoj Govindassamy.

2022-02-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch HADOOP-18127
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 0bcc3c7eea0432c131f7da4f847aa77fd61f5a18
Author: Andrew Wang 
AuthorDate: Fri Feb 18 18:34:11 2022 -0800

HADOOP-13722. Code cleanup -- ViewFileSystem and InodeTree. Contributed by 
Manoj Govindassamy.

(cherry picked from commit 0f4afc81009129bbee89d5b6cf22c8dda612d223)
---
 .../org/apache/hadoop/fs/viewfs/InodeTree.java | 198 ++---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java|  85 +
 .../apache/hadoop/fs/viewfs/TestViewFsConfig.java  |  35 ++--
 3 files changed, 146 insertions(+), 172 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index 779cec8..c9bdf63 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -37,47 +37,45 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
-
 /**
  * InodeTree implements a mount-table as a tree of inodes.
  * It is used to implement ViewFs and ViewFileSystem.
  * In order to use it the caller must subclass it and implement
  * the abstract methods {@link #getTargetFileSystem(INodeDir)}, etc.
- * 
+ *
  * The mountable is initialized from the config variables as 
  * specified in {@link ViewFs}
  *
  * @param  is AbstractFileSystem or FileSystem
- * 
- * The three main methods are
- * {@link #InodeTreel(Configuration)} // constructor
+ *
+ * The two main methods are
  * {@link #InodeTree(Configuration, String)} // constructor
  * {@link #resolve(String, boolean)} 
  */
 
 @InterfaceAudience.Private
-@InterfaceStability.Unstable 
+@InterfaceStability.Unstable
 abstract class InodeTree {
-  static enum ResultKind {isInternalDir, isExternalDir;};
+  enum ResultKind {
+INTERNAL_DIR,
+EXTERNAL_DIR
+  }
+
   static final Path SlashPath = new Path("/");
-  
-  final INodeDir root; // the root of the mount table
-  
-  final String homedirPrefix; // the homedir config value for this mount table
-  
-  List> mountPoints = new ArrayList>();
-  
-  
+  private final INodeDir root; // the root of the mount table
+  private final String homedirPrefix; // the homedir for this mount table
+  private List> mountPoints = new ArrayList>();
+
   static class MountPoint {
 String src;
 INodeLink target;
+
 MountPoint(String srcPath, INodeLink mountLink) {
   src = srcPath;
   target = mountLink;
 }
-
   }
-  
+
   /**
* Breaks file path into component names.
* @param path
@@ -85,18 +83,19 @@ abstract class InodeTree {
*/
   static String[] breakIntoPathComponents(final String path) {
 return path == null ? null : path.split(Path.SEPARATOR);
-  } 
-  
+  }
+
   /**
* Internal class for inode tree
* @param 
*/
   abstract static class INode {
 final String fullPath; // the full path to the root
+
 public INode(String pathToNode, UserGroupInformation aUgi) {
   fullPath = pathToNode;
 }
-  };
+  }
 
   /**
* Internal class to represent an internal dir of the mount table
@@ -106,37 +105,28 @@ abstract class InodeTree {
 final Map> children = new HashMap>();
 T InodeDirFs =  null; // file system of this internal directory of mountT
 boolean isRoot = false;
-
+
 INodeDir(final String pathToNode, final UserGroupInformation aUgi) {
   super(pathToNode, aUgi);
 }
 
-INode resolve(final String pathComponent) throws FileNotFoundException {
-  final INode result = resolveInternal(pathComponent);
-  if (result == null) {
-throw new FileNotFoundException();
-  }
-  return result;
-}
-
 INode resolveInternal(final String pathComponent) {
   return children.get(pathComponent);
 }
-
+
 INodeDir addDir(final String pathComponent,
-final UserGroupInformation aUgi)
-  throws FileAlreadyExistsException {
+final UserGroupInformation aUgi) throws FileAlreadyExistsException {
   if (children.containsKey(pathComponent)) {
 throw new 

[hadoop] 03/03: HADOOP-13055. Implement linkMergeSlash and linkFallback for ViewFileSystem

2022-02-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch HADOOP-18127
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 3682078fb872604576180610cc2fac67d04a8692
Author: Manoj Govindassamy 
AuthorDate: Fri Oct 13 17:43:13 2017 -0700

HADOOP-13055. Implement linkMergeSlash and linkFallback for ViewFileSystem

(cherry picked from commit 133d7ca76e3d4b60292d57429d4259e80bec650a)
---
 .../org/apache/hadoop/fs/viewfs/ConfigUtil.java|  68 +++-
 .../org/apache/hadoop/fs/viewfs/Constants.java |  16 +-
 .../org/apache/hadoop/fs/viewfs/InodeTree.java | 351 ++---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java|  13 +-
 .../java/org/apache/hadoop/fs/viewfs/ViewFs.java   |  14 +-
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   |   4 +-
 .../hadoop-hdfs/src/site/markdown/ViewFs.md|  44 ++-
 .../fs/viewfs/TestViewFileSystemLinkFallback.java  | 264 
 .../viewfs/TestViewFileSystemLinkMergeSlash.java   | 234 ++
 9 files changed, 940 insertions(+), 68 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
index 8acd41f..5867f62 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs.viewfs;
 
 import java.net.URI;
+import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
@@ -68,7 +69,72 @@ public class ConfigUtil {
 addLink( conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, 
 src, target);   
   }
-  
+
+  /**
+   * Add a LinkMergeSlash to the config for the specified mount table.
+   * @param conf
+   * @param mountTableName
+   * @param target
+   */
+  public static void addLinkMergeSlash(Configuration conf,
+  final String mountTableName, final URI target) {
+conf.set(getConfigViewFsPrefix(mountTableName) + "." +
+Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH, target.toString());
+  }
+
+  /**
+   * Add a LinkMergeSlash to the config for the default mount table.
+   * @param conf
+   * @param target
+   */
+  public static void addLinkMergeSlash(Configuration conf, final URI target) {
+addLinkMergeSlash(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE,
+target);
+  }
+
+  /**
+   * Add a LinkFallback to the config for the specified mount table.
+   * @param conf
+   * @param mountTableName
+   * @param target
+   */
+  public static void addLinkFallback(Configuration conf,
+  final String mountTableName, final URI target) {
+conf.set(getConfigViewFsPrefix(mountTableName) + "." +
+Constants.CONFIG_VIEWFS_LINK_FALLBACK, target.toString());
+  }
+
+  /**
+   * Add a LinkFallback to the config for the default mount table.
+   * @param conf
+   * @param target
+   */
+  public static void addLinkFallback(Configuration conf, final URI target) {
+addLinkFallback(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE,
+target);
+  }
+
+  /**
+   * Add a LinkMerge to the config for the specified mount table.
+   * @param conf
+   * @param mountTableName
+   * @param targets
+   */
+  public static void addLinkMerge(Configuration conf,
+  final String mountTableName, final URI[] targets) {
+conf.set(getConfigViewFsPrefix(mountTableName) + "." +
+Constants.CONFIG_VIEWFS_LINK_MERGE, Arrays.toString(targets));
+  }
+
+  /**
+   * Add a LinkMerge to the config for the default mount table.
+   * @param conf
+   * @param targets
+   */
+  public static void addLinkMerge(Configuration conf, final URI[] targets) {
+addLinkMerge(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, targets);
+  }
+
   /**
*
* @param conf
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
index 3f9aae2..7a0a6661 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
@@ -51,12 +51,17 @@ public interface Constants {
   /**
* Config variable for specifying a simple link
*/
-  public static final String CONFIG_VIEWFS_LINK = "link";
-  
+  String CONFIG_VIEWFS_LINK = "link";
+
+  /**
+   * Config variable for specifying a fallback for link mount points.
+   */
+  String CONFIG_VIEWFS_LINK_FALLBACK = "linkFallback";
+
   /**
* Config variable for specifying a merge link
*/
-  public static final String CONFIG_VIEWFS_LINK_MERGE = "linkMerge";
+  String CONFIG_VIEWFS_LINK_MERGE = "linkMerge";
 
   /**

[hadoop] 02/03: HADOOP-12077. Provide a multi-URI replication Inode for ViewFs. Contributed by Gera Shegalov

2022-02-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch HADOOP-18127
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit aa8eb78e6b532a29f25dd1104e539617b2bce916
Author: Chris Douglas 
AuthorDate: Tue Sep 5 23:30:18 2017 -0700

HADOOP-12077. Provide a multi-URI replication Inode for ViewFs. Contributed 
by Gera Shegalov

(cherry picked from commit 1f3bc63e6772be81bc9a6a7d93ed81d2a9e066c0)
---
 .../org/apache/hadoop/fs/viewfs/ConfigUtil.java|  27 +
 .../org/apache/hadoop/fs/viewfs/Constants.java |   8 +-
 .../org/apache/hadoop/fs/viewfs/InodeTree.java |  62 +-
 .../org/apache/hadoop/fs/viewfs/NflyFSystem.java   | 951 +
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java|  34 +-
 .../java/org/apache/hadoop/fs/viewfs/ViewFs.java   |   7 +-
 .../viewfs/TestViewFileSystemLocalFileSystem.java  |  77 +-
 .../apache/hadoop/fs/viewfs/TestViewFsConfig.java  |  10 +-
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java   | 147 +++-
 9 files changed, 1270 insertions(+), 53 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
index bb941c7..8acd41f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.viewfs;
 import java.net.URI;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.StringUtils;
 
 /**
  * Utilities for config variables of the viewFs See {@link ViewFs}
@@ -69,6 +70,32 @@ public class ConfigUtil {
   }
   
   /**
+   *
+   * @param conf
+   * @param mountTableName
+   * @param src
+   * @param settings
+   * @param targets
+   */
+  public static void addLinkNfly(Configuration conf, String mountTableName,
+  String src, String settings, final URI ... targets) {
+
+settings = settings == null
+? "minReplication=2,repairOnRead=true"
+: settings;
+
+conf.set(getConfigViewFsPrefix(mountTableName) + "." +
+Constants.CONFIG_VIEWFS_LINK_NFLY + "." + settings + "." + src,
+StringUtils.uriToString(targets));
+  }
+
+  public static void addLinkNfly(final Configuration conf, final String src,
+  final URI ... targets) {
+addLinkNfly(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, src, null,
+targets);
+  }
+
+  /**
* Add config variable for homedir for default mount table
* @param conf - add to this conf
* @param homedir - the home dir path starting with slash
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
index 0c0e8a3..3f9aae2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
@@ -57,7 +57,13 @@ public interface Constants {
* Config variable for specifying a merge link
*/
   public static final String CONFIG_VIEWFS_LINK_MERGE = "linkMerge";
-  
+
+  /**
+   * Config variable for specifying an nfly link. Nfly writes to multiple
+   * locations, and allows reads from the closest one.
+   */
+  String CONFIG_VIEWFS_LINK_NFLY = "linkNfly";
+
   /**
* Config variable for specifying a merge of the root of the mount-table
*  with the root of another file system. 
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index c9bdf63..199ccc6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -134,6 +134,12 @@ abstract class InodeTree {
 }
   }
 
+  enum LinkType {
+SINGLE,
+MERGE,
+NFLY
+  }
+
   /**
* An internal class to represent a mount link.
* A mount link can be single dir link or a merge dir link.
@@ -147,7 +153,6 @@ abstract class InodeTree {
* is changed later it is then ignored (a dir with null entries)
*/
   static class INodeLink extends INode {
-final boolean isMergeLink; // true if MergeLink
 final URI[] targetDirLinkList;
 private T targetFileSystem;   // file system object created from the link.
 // Function to initialize file system. Only applicable for simple links
@@ -155,14 +160,13 @@ abstract class InodeTree {
 private final Object lock = new Object();
 
 /**
- * Construct a mergeLink.
+ * Construct a mergeLink or nfly.
  */
 

[hadoop] branch trunk updated: YARN-10590. Consider legacy auto queue creation absolute resource template to avoid rounding errors. Contributed by Andras Gyori

2022-02-22 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3653754  YARN-10590. Consider legacy auto queue creation absolute 
resource template to avoid rounding errors. Contributed by Andras Gyori
3653754 is described below

commit 365375412fe5eea82549630ee8c5598502b95caf
Author: Szilard Nemeth 
AuthorDate: Tue Feb 22 12:26:35 2022 +0100

YARN-10590. Consider legacy auto queue creation absolute resource template 
to avoid rounding errors. Contributed by Andras Gyori
---
 .../scheduler/capacity/AutoCreatedLeafQueue.java   | 23 +---
 .../capacity/AutoCreatedLeafQueueConfig.java   | 15 
 .../scheduler/capacity/ManagedParentQueue.java | 41 ++
 .../GuaranteedOrZeroCapacityOverTimePolicy.java| 10 +++---
 .../TestAbsoluteResourceWithAutoQueue.java |  8 ++---
 .../TestCapacitySchedulerAutoCreatedQueueBase.java | 28 +--
 6 files changed, 83 insertions(+), 42 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueue.java
index 384a652..910d8de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueue.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import org.apache.hadoop.yarn.api.records.Resource;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueResourceQuotas;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
 
@@ -30,6 +31,8 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
 
+import static 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AbstractCSQueue.CapacityConfigType.ABSOLUTE_RESOURCE;
+
 /**
  * Leaf queues which are auto created by an underlying implementation of
  * AbstractManagedParentQueue. Eg: PlanQueue for reservations or
@@ -81,14 +84,14 @@ public class AutoCreatedLeafQueue extends 
AbstractAutoCreatedLeafQueue {
   QueueCapacities capacities = leafQueueTemplate.getQueueCapacities();
 
   //reset capacities for the leaf queue
-  mergeCapacities(capacities);
+  mergeCapacities(capacities, leafQueueTemplate.getResourceQuotas());
 
 } finally {
   writeLock.unlock();
 }
   }
 
-  public void mergeCapacities(QueueCapacities capacities) {
+  public void mergeCapacities(QueueCapacities capacities, QueueResourceQuotas 
resourceQuotas) {
 for ( String nodeLabel : capacities.getExistingNodeLabels()) {
   queueCapacities.setCapacity(nodeLabel,
   capacities.getCapacity(nodeLabel));
@@ -101,9 +104,19 @@ public class AutoCreatedLeafQueue extends 
AbstractAutoCreatedLeafQueue {
 
   Resource resourceByLabel = labelManager.getResourceByLabel(nodeLabel,
   queueContext.getClusterResource());
-  getQueueResourceQuotas().setEffectiveMinResource(nodeLabel,
-  Resources.multiply(resourceByLabel,
-  queueCapacities.getAbsoluteCapacity(nodeLabel)));
+  // Update effective resource from template due to rounding errors.
+  // However, we need to consider deactivation as well, in which case we 
fall back to
+  // Percentage calculation (as absolute capacity will be 0, resource will 
be zero as well).
+  if (getCapacityConfigType().equals(ABSOLUTE_RESOURCE)
+  && queueCapacities.getAbsoluteCapacity(nodeLabel) > 0) {
+getQueueResourceQuotas().setEffectiveMinResource(nodeLabel,
+resourceQuotas.getConfiguredMinResource(nodeLabel));
+  } else {
+getQueueResourceQuotas().setEffectiveMinResource(nodeLabel,
+Resources.multiply(resourceByLabel,
+queueCapacities.getAbsoluteCapacity(nodeLabel)));
+  }
+
   getQueueResourceQuotas().setEffectiveMaxResource(nodeLabel,
   Resources.multiply(resourceByLabel, queueCapacities
   .getAbsoluteMaximumCapacity(nodeLabel)));
diff --git 

[hadoop] branch trunk updated: YARN-10944. AbstractCSQueue: Eliminate code duplication in overloaded versions of setMaxCapacity. Contributed by Andras Gyori

2022-02-22 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0463498  YARN-10944. AbstractCSQueue: Eliminate code duplication in 
overloaded versions of setMaxCapacity. Contributed by Andras Gyori
0463498 is described below

commit 0463498adcd3b62b83e6383cae556093d4270d66
Author: Szilard Nemeth 
AuthorDate: Tue Feb 22 11:55:34 2022 +0100

YARN-10944. AbstractCSQueue: Eliminate code duplication in overloaded 
versions of setMaxCapacity. Contributed by Andras Gyori
---
 .../scheduler/capacity/AbstractCSQueue.java| 32 --
 1 file changed, 11 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 4eb7b68..020c601 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -74,6 +74,7 @@ import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import static 
org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.NO_LABEL;
 import static 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.DOT;
 
 public abstract class AbstractCSQueue implements CSQueue {
@@ -279,33 +280,23 @@ public abstract class AbstractCSQueue implements CSQueue {
   }
 
   /**
-   * Set maximum capacity - used only for testing.
+   * Set maximum capacity for empty node label.
* @param maximumCapacity new max capacity
*/
   @VisibleForTesting
   void setMaxCapacity(float maximumCapacity) {
-writeLock.lock();
-try {
-  // Sanity check
-  CSQueueUtils.checkMaxCapacity(this.queuePath,
-  queueCapacities.getCapacity(), maximumCapacity);
-  float absMaxCapacity = CSQueueUtils.computeAbsoluteMaximumCapacity(
-  maximumCapacity, parent);
-  CSQueueUtils.checkAbsoluteCapacity(this.queuePath,
-  queueCapacities.getAbsoluteCapacity(), absMaxCapacity);
-
-  queueCapacities.setMaximumCapacity(maximumCapacity);
-  queueCapacities.setAbsoluteMaximumCapacity(absMaxCapacity);
-} finally {
-  writeLock.unlock();
-}
+internalSetMaximumCapacity(maximumCapacity, NO_LABEL);
   }
 
   /**
-   * Set maximum capacity
+   * Set maximum capacity.
* @param maximumCapacity new max capacity
*/
   void setMaxCapacity(String nodeLabel, float maximumCapacity) {
+internalSetMaximumCapacity(maximumCapacity, nodeLabel);
+  }
+
+  private void internalSetMaximumCapacity(float maximumCapacity, String 
nodeLabel) {
 writeLock.lock();
 try {
   // Sanity check
@@ -323,7 +314,6 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
   }
 
-
   @Override
   public String getDefaultNodeLabelExpression() {
 return this.queueNodeLabelsSettings.getDefaultLabelExpression();
@@ -917,7 +907,7 @@ public abstract class AbstractCSQueue implements CSQueue {
   }
 
   private static String ensurePartition(String partition) {
-return Optional.ofNullable(partition).orElse(RMNodeLabelsManager.NO_LABEL);
+return Optional.ofNullable(partition).orElse(NO_LABEL);
   }
 
   @FunctionalInterface
@@ -1016,8 +1006,8 @@ public abstract class AbstractCSQueue implements CSQueue {
 
 // Add NO_LABEL also to this list as NO_LABEL also can be granted with
 // resource in many general cases.
-if (!nodeLabels.contains(RMNodeLabelsManager.NO_LABEL)) {
-  nodeLabels.add(RMNodeLabelsManager.NO_LABEL);
+if (!nodeLabels.contains(NO_LABEL)) {
+  nodeLabels.add(NO_LABEL);
 }
 return nodeLabels;
   }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: HADOOP-11867. Add a high-performance vectored read API. (#3904)

2022-02-22 Thread mthakur
This is an automated email from the ASF dual-hosted git repository.

mthakur pushed a commit to branch feature-vectored-io
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 87682c976f0bfdd2ee8194513f751fa96be1df07
Author: Mukund Thakur 
AuthorDate: Tue Feb 1 19:52:38 2022 +0530

HADOOP-11867. Add a high-performance vectored read API. (#3904)

part of HADOOP-18103.
Add support for multiple ranged vectored read api in PositionedReadable.
The default iterates through the ranges to read each synchronously,
but the intent is that FSDataInputStream subclasses can make more
efficient readers especially in object stores implementation.

Also added implementation in S3A where smaller ranges are merged and
sliced byte buffers are returned to the readers. All the merged ranged are
fetched from S3 asynchronously.


Contributed By: Owen O'Malley and Mukund Thakur
---
 dev-support/Jenkinsfile|   2 +-
 .../apache/hadoop/fs/BufferedFSInputStream.java|  27 +-
 .../org/apache/hadoop/fs/ChecksumFileSystem.java   | 208 +---
 .../org/apache/hadoop/fs/FSDataInputStream.java|  22 +-
 .../main/java/org/apache/hadoop/fs/FileRange.java  |  55 +++
 .../java/org/apache/hadoop/fs/FileRangeImpl.java   |  69 
 .../org/apache/hadoop/fs/PositionedReadable.java   |  43 ++-
 .../org/apache/hadoop/fs/RawLocalFileSystem.java   | 108 +-
 .../apache/hadoop/fs/impl/CombinedFileRange.java   |  71 
 .../apache/hadoop/fs/impl/VectoredReadUtils.java   | 277 +++
 .../site/markdown/filesystem/fsdatainputstream.md  |  31 ++
 .../contract/AbstractContractVectoredReadTest.java | 375 +
 .../localfs/TestLocalFSContractVectoredRead.java   |  35 ++
 .../rawlocal/TestRawLocalContractVectoredRead.java |  35 ++
 .../hadoop/fs/impl/TestVectoredReadUtils.java  | 344 +++
 .../java/org/apache/hadoop/test/MoreAsserts.java   |  37 +-
 hadoop-common-project/pom.xml  |   1 -
 hadoop-project/pom.xml |  11 +
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java|   3 +-
 .../org/apache/hadoop/fs/s3a/S3AInputStream.java   | 258 +-
 .../contract/s3a/ITestS3AContractVectoredRead.java |  54 +++
 .../hadoop/fs/s3a/TestS3AInputStreamRetry.java |   3 +-
 .../hadoop-aws/src/test/resources/log4j.properties |   2 +-
 hadoop-tools/hadoop-benchmark/pom.xml  |  94 ++
 .../hadoop-benchmark/src/main/assembly/uber.xml|  33 ++
 .../hadoop-benchmark/src/main/findbugs/exclude.xml |  22 ++
 .../hadoop/benchmark/VectoredReadBenchmark.java| 245 ++
 .../org/apache/hadoop/benchmark/package-info.java  |  22 ++
 hadoop-tools/pom.xml   |   1 +
 pom.xml|   1 +
 30 files changed, 2422 insertions(+), 67 deletions(-)

diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 0ec32e3..3f6331b 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -47,7 +47,7 @@ pipeline {
 
 options {
 buildDiscarder(logRotator(numToKeepStr: '5'))
-timeout (time: 24, unit: 'HOURS')
+timeout (time: 48, unit: 'HOURS')
 timestamps()
 checkoutToSubdirectory('src')
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
index 59345f5..7f31712 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -22,6 +22,9 @@ import java.io.EOFException;
 import java.io.FileDescriptor;
 import java.io.IOException;
 import java.util.StringJoiner;
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.function.IntFunction;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -158,8 +161,24 @@ public class BufferedFSInputStream extends 
BufferedInputStream
   @Override
   public String toString() {
 return new StringJoiner(", ",
-BufferedFSInputStream.class.getSimpleName() + "[", "]")
-.add("in=" + in)
-.toString();
+BufferedFSInputStream.class.getSimpleName() + "[", "]")
+.add("in=" + in)
+.toString();
+  }
+
+  @Override
+  public int minSeekForVectorReads() {
+return ((PositionedReadable) in).minSeekForVectorReads();
+  }
+
+  @Override
+  public int maxReadSizeForVectorReads() {
+return ((PositionedReadable) 

[hadoop] branch feature-vectored-io updated (ac08a25 -> 87682c9)

2022-02-22 Thread mthakur
This is an automated email from the ASF dual-hosted git repository.

mthakur pushed a change to branch feature-vectored-io
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


omit ac08a25  HADOOP-11867. Add a high-performance vectored read API. 
(#3904)
 add fcb1076  HDFS-16426. Fix nextBlockReportTime when trigger full block 
report force (#3887)
 add e355646  HDFS-16399. Reconfig cache report parameters for datanode 
(#3841)
 add db2c320  HDFS-16423. Balancer should not get blocks on stale storages 
(#3883)
 add 7c97c0f  HADOOP-18084. ABFS: Add testfilePath while verifying test 
contents are read correctly (#3903)
 add dae33cf  YARN-11065. Bump follow-redirects from 1.13.3 to 1.14.7 in 
hadoop-yarn-ui  (#3890)
 add 15b820c  HDFS-16402. Improve HeartbeatManager logic to avoid incorrect 
stats. (#3839). Contributed by tomscut.
 add 5ef335d  HDFS-16430. Add validation to maximum blocks in EC group when 
adding an EC policy (#3899). Contributed by daimin.
 add 795a5ef  HADOOP-17593. hadoop-huaweicloud and hadoop-cloud-storage to 
remove log4j as transitive dependency
 add b795f6f  HADOOP-18094. Disable S3A auditing by default.
 add 3ed3c74  YARN-11015. Decouple queue capacity with ability to run 
OPPORTUNISTIC container (#3779)
 add d699389  HDFS-16403. Improve FUSE IO performance by supporting FUSE 
parameter max_background (#3842)
 add 43153e8  HDFS-16428. Source path with storagePolicy cause wrong 
typeConsumed while rename (#3898). Contributed by lei w.
 add 94b884a  HDFS-16262. Async refresh of cached locations in 
DFSInputStream (#3527)
 add 8c7c49d  HDFS-16401.Remove the worthless 
DatasetVolumeChecker#numAsyncDatasetChecks. (#3838)
 add 0d17b62  HADOOP-18093. Better exception handling for 
testFileStatusOnMountLink() in ViewFsBaseTest.java (#3918). Contributed by Xing 
Lin.
 add 4b26635  YARN-11034. Add enhanced headroom in AllocateResponse (#3766)
 add 4faac58  HADOOP-18089. Test coverage for Async profiler servlets 
(#3913)
 add c2ff390  HDFS-16398. Reconfig block report parameters for datanode 
(#3831)
 add 6136d63  HDFS-16427. Add debug log for 
BlockManager#chooseExcessRedundancyStriped (#3888)
 add e17c96a  HDFS-16429. Add DataSetLockManager to manage fine-grain locks 
for FsDataSetImpl. (#3900). Contributed by limingxiang.
 add 1c01944  YARN-11068. Exclude transitive log4j2 dependency coming from 
solr 8. (#3936)
 add 02f6bad  Revert "YARN-11068. Exclude transitive log4j2 dependency 
coming from solr 8. (#3936)"
 add 9cb535c  YARN-10561. Upgrade node.js to 12.22.1 and yarn to 1.22.5 in 
YARN application catalog webapp (#2591)
 add 39cad5f  HDFS-16169. Fix TestBlockTokenWithDFSStriped#testEnd2End 
failure (#3850)
 add bd50b91  HDFS-16444. Show start time of JournalNode on Web (#3943)
 add 089e06d  HDFS-16443. Fix edge case where DatanodeAdminDefaultMonitor 
doubly enqueues a DatanodeDescriptor on exception (#3942)
 add e8f767f  YARN-11026. Make default AppPlacementAllocator configurable 
in AppSch… (#3741)
 add aeae571  Revert "HADOOP-18024. SocketChannel is not closed when 
IOException happens in Server$Listener.doAccept (#3719)"
 add 87abc43  YARN-10822. Containers going from New to Scheduled transition 
for kil… (#3632)
 add ec2fd01  YARN-10459. containerLaunchedOnNode method not need to hold 
scheduler… (#3195)
 add ed44662  HDFS-16445. Make HDFS count, mkdir, rm cross platform (#3945)
 add b5b07af  HDFS-16435. Remove no need TODO comment for 
ObserverReadProxyProvider (#3912). Contributed by tomscut.
 add 41c86b6  HADOOP-18101. Bump aliyun-sdk-oss to 3.13.2 and jdom2 to 
2.0.6.1 (#3951)
 add 3684c7f  HADOOP-18100: Change scope of inner classes in InodeTree to 
make them accessible outside package
 add 5e7ce26e HADOOP-18085. S3 SDK Upgrade causes AccessPoint ARN endpoint 
mistranslation (#3902)
 add 125e3b6  HDFS-16437 ReverseXML processor doesn't accept XML files 
without the … (#3926)
 add b39b334  HADOOP-18098. Basic verification for the release candidate 
vote (#3944)
 add 3e7a7c3  HDFS-16411 RBF: RouterId is NULL when disable 
RourterRpcServer (#3878)
 add bf0cefb  HDFS-16406. ReadsFromLocalClient counts short-circuit reads 
(#3847)
 add a08e69d  HADOOP-18114. Documentation correction in assumed_roles.md  
(#3949)
 add 390967f  RBF: Add search box in Router's tab-mounttable web page 
(#3968)
 add efdec92  HADOOP-18091. S3A auditing leaks memory through ThreadLocal 
references (#3930)
 add c777142  RBF: Registry HDFS Router's rpcserver & rpcclient metrics for 
PrometheusSink. (#3965)
 add fe583c4  HADOOP-18096. Distcp: Sync moves filtered file to home 
directory rather than deleting. (#3940). Contributed by Ayush Saxena.
 add ca8ba24  HADOOP-18110. ViewFileSystem: Add Support for Localized Trash 
Root
 add 0e74f1e  Fix thread safety of EC decoding during concurrent preads 
(#3881)
 add