[hadoop] branch HADOOP-18127 created (now 3682078)

2022-02-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a change to branch HADOOP-18127
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at 3682078  HADOOP-13055. Implement linkMergeSlash and linkFallback for 
ViewFileSystem

This branch includes the following new commits:

 new 0bcc3c7  HADOOP-13722. Code cleanup -- ViewFileSystem and InodeTree. 
Contributed by Manoj Govindassamy.
 new aa8eb78  HADOOP-12077. Provide a multi-URI replication Inode for 
ViewFs. Contributed by Gera Shegalov
 new 3682078  HADOOP-13055. Implement linkMergeSlash and linkFallback for 
ViewFileSystem

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/03: HADOOP-13722. Code cleanup -- ViewFileSystem and InodeTree. Contributed by Manoj Govindassamy.

2022-02-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch HADOOP-18127
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 0bcc3c7eea0432c131f7da4f847aa77fd61f5a18
Author: Andrew Wang 
AuthorDate: Fri Feb 18 18:34:11 2022 -0800

HADOOP-13722. Code cleanup -- ViewFileSystem and InodeTree. Contributed by 
Manoj Govindassamy.

(cherry picked from commit 0f4afc81009129bbee89d5b6cf22c8dda612d223)
---
 .../org/apache/hadoop/fs/viewfs/InodeTree.java | 198 ++---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java|  85 +
 .../apache/hadoop/fs/viewfs/TestViewFsConfig.java  |  35 ++--
 3 files changed, 146 insertions(+), 172 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index 779cec8..c9bdf63 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -37,47 +37,45 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
-
 /**
  * InodeTree implements a mount-table as a tree of inodes.
  * It is used to implement ViewFs and ViewFileSystem.
  * In order to use it the caller must subclass it and implement
  * the abstract methods {@link #getTargetFileSystem(INodeDir)}, etc.
- * 
+ *
  * The mountable is initialized from the config variables as 
  * specified in {@link ViewFs}
  *
  * @param  is AbstractFileSystem or FileSystem
- * 
- * The three main methods are
- * {@link #InodeTreel(Configuration)} // constructor
+ *
+ * The two main methods are
  * {@link #InodeTree(Configuration, String)} // constructor
  * {@link #resolve(String, boolean)} 
  */
 
 @InterfaceAudience.Private
-@InterfaceStability.Unstable 
+@InterfaceStability.Unstable
 abstract class InodeTree {
-  static enum ResultKind {isInternalDir, isExternalDir;};
+  enum ResultKind {
+INTERNAL_DIR,
+EXTERNAL_DIR
+  }
+
   static final Path SlashPath = new Path("/");
-  
-  final INodeDir root; // the root of the mount table
-  
-  final String homedirPrefix; // the homedir config value for this mount table
-  
-  List> mountPoints = new ArrayList>();
-  
-  
+  private final INodeDir root; // the root of the mount table
+  private final String homedirPrefix; // the homedir for this mount table
+  private List> mountPoints = new ArrayList>();
+
   static class MountPoint {
 String src;
 INodeLink target;
+
 MountPoint(String srcPath, INodeLink mountLink) {
   src = srcPath;
   target = mountLink;
 }
-
   }
-  
+
   /**
* Breaks file path into component names.
* @param path
@@ -85,18 +83,19 @@ abstract class InodeTree {
*/
   static String[] breakIntoPathComponents(final String path) {
 return path == null ? null : path.split(Path.SEPARATOR);
-  } 
-  
+  }
+
   /**
* Internal class for inode tree
* @param 
*/
   abstract static class INode {
 final String fullPath; // the full path to the root
+
 public INode(String pathToNode, UserGroupInformation aUgi) {
   fullPath = pathToNode;
 }
-  };
+  }
 
   /**
* Internal class to represent an internal dir of the mount table
@@ -106,37 +105,28 @@ abstract class InodeTree {
 final Map> children = new HashMap>();
 T InodeDirFs =  null; // file system of this internal directory of mountT
 boolean isRoot = false;
-
+
 INodeDir(final String pathToNode, final UserGroupInformation aUgi) {
   super(pathToNode, aUgi);
 }
 
-INode resolve(final String pathComponent) throws FileNotFoundException {
-  final INode result = resolveInternal(pathComponent);
-  if (result == null) {
-throw new FileNotFoundException();
-  }
-  return result;
-}
-
 INode resolveInternal(final String pathComponent) {
   return children.get(pathComponent);
 }
-
+
 INodeDir addDir(final String pathComponent,
-final UserGroupInformation aUgi)
-  throws FileAlreadyExistsException {
+final UserGroupInformation aUgi) throws FileAlreadyExistsException {
   if (children.

[hadoop] 03/03: HADOOP-13055. Implement linkMergeSlash and linkFallback for ViewFileSystem

2022-02-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch HADOOP-18127
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 3682078fb872604576180610cc2fac67d04a8692
Author: Manoj Govindassamy 
AuthorDate: Fri Oct 13 17:43:13 2017 -0700

HADOOP-13055. Implement linkMergeSlash and linkFallback for ViewFileSystem

(cherry picked from commit 133d7ca76e3d4b60292d57429d4259e80bec650a)
---
 .../org/apache/hadoop/fs/viewfs/ConfigUtil.java|  68 +++-
 .../org/apache/hadoop/fs/viewfs/Constants.java |  16 +-
 .../org/apache/hadoop/fs/viewfs/InodeTree.java | 351 ++---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java|  13 +-
 .../java/org/apache/hadoop/fs/viewfs/ViewFs.java   |  14 +-
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   |   4 +-
 .../hadoop-hdfs/src/site/markdown/ViewFs.md|  44 ++-
 .../fs/viewfs/TestViewFileSystemLinkFallback.java  | 264 
 .../viewfs/TestViewFileSystemLinkMergeSlash.java   | 234 ++
 9 files changed, 940 insertions(+), 68 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
index 8acd41f..5867f62 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs.viewfs;
 
 import java.net.URI;
+import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
@@ -68,7 +69,72 @@ public class ConfigUtil {
 addLink( conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, 
 src, target);   
   }
-  
+
+  /**
+   * Add a LinkMergeSlash to the config for the specified mount table.
+   * @param conf
+   * @param mountTableName
+   * @param target
+   */
+  public static void addLinkMergeSlash(Configuration conf,
+  final String mountTableName, final URI target) {
+conf.set(getConfigViewFsPrefix(mountTableName) + "." +
+Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH, target.toString());
+  }
+
+  /**
+   * Add a LinkMergeSlash to the config for the default mount table.
+   * @param conf
+   * @param target
+   */
+  public static void addLinkMergeSlash(Configuration conf, final URI target) {
+addLinkMergeSlash(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE,
+target);
+  }
+
+  /**
+   * Add a LinkFallback to the config for the specified mount table.
+   * @param conf
+   * @param mountTableName
+   * @param target
+   */
+  public static void addLinkFallback(Configuration conf,
+  final String mountTableName, final URI target) {
+conf.set(getConfigViewFsPrefix(mountTableName) + "." +
+Constants.CONFIG_VIEWFS_LINK_FALLBACK, target.toString());
+  }
+
+  /**
+   * Add a LinkFallback to the config for the default mount table.
+   * @param conf
+   * @param target
+   */
+  public static void addLinkFallback(Configuration conf, final URI target) {
+addLinkFallback(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE,
+target);
+  }
+
+  /**
+   * Add a LinkMerge to the config for the specified mount table.
+   * @param conf
+   * @param mountTableName
+   * @param targets
+   */
+  public static void addLinkMerge(Configuration conf,
+  final String mountTableName, final URI[] targets) {
+conf.set(getConfigViewFsPrefix(mountTableName) + "." +
+Constants.CONFIG_VIEWFS_LINK_MERGE, Arrays.toString(targets));
+  }
+
+  /**
+   * Add a LinkMerge to the config for the default mount table.
+   * @param conf
+   * @param targets
+   */
+  public static void addLinkMerge(Configuration conf, final URI[] targets) {
+addLinkMerge(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, targets);
+  }
+
   /**
*
* @param conf
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
index 3f9aae2..7a0a6661 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
@@ -51,12 +51,17 @@ public interface Constants {
   /**
* Config variable for specifying a simple link
*/
-  public static final String CONFIG_VIEWFS_LINK = "link";
-  
+  String CONFIG_VIEWFS_LINK = "link";
+
+  /**
+   * Config variable for specifying a fallback for link mount points.
+   */
+  String CONFIG_VIEWFS_LINK_FALLBACK = "linkFallback";
+
   /**
* Config variable for specifying a merge link
*/
-  public static final String CONFIG_VIEWFS_LINK_MERGE = 

[hadoop] 02/03: HADOOP-12077. Provide a multi-URI replication Inode for ViewFs. Contributed by Gera Shegalov

2022-02-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch HADOOP-18127
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit aa8eb78e6b532a29f25dd1104e539617b2bce916
Author: Chris Douglas 
AuthorDate: Tue Sep 5 23:30:18 2017 -0700

HADOOP-12077. Provide a multi-URI replication Inode for ViewFs. Contributed 
by Gera Shegalov

(cherry picked from commit 1f3bc63e6772be81bc9a6a7d93ed81d2a9e066c0)
---
 .../org/apache/hadoop/fs/viewfs/ConfigUtil.java|  27 +
 .../org/apache/hadoop/fs/viewfs/Constants.java |   8 +-
 .../org/apache/hadoop/fs/viewfs/InodeTree.java |  62 +-
 .../org/apache/hadoop/fs/viewfs/NflyFSystem.java   | 951 +
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java|  34 +-
 .../java/org/apache/hadoop/fs/viewfs/ViewFs.java   |   7 +-
 .../viewfs/TestViewFileSystemLocalFileSystem.java  |  77 +-
 .../apache/hadoop/fs/viewfs/TestViewFsConfig.java  |  10 +-
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java   | 147 +++-
 9 files changed, 1270 insertions(+), 53 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
index bb941c7..8acd41f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.viewfs;
 import java.net.URI;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.StringUtils;
 
 /**
  * Utilities for config variables of the viewFs See {@link ViewFs}
@@ -69,6 +70,32 @@ public class ConfigUtil {
   }
   
   /**
+   *
+   * @param conf
+   * @param mountTableName
+   * @param src
+   * @param settings
+   * @param targets
+   */
+  public static void addLinkNfly(Configuration conf, String mountTableName,
+  String src, String settings, final URI ... targets) {
+
+settings = settings == null
+? "minReplication=2,repairOnRead=true"
+: settings;
+
+conf.set(getConfigViewFsPrefix(mountTableName) + "." +
+Constants.CONFIG_VIEWFS_LINK_NFLY + "." + settings + "." + src,
+StringUtils.uriToString(targets));
+  }
+
+  public static void addLinkNfly(final Configuration conf, final String src,
+  final URI ... targets) {
+addLinkNfly(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, src, null,
+targets);
+  }
+
+  /**
* Add config variable for homedir for default mount table
* @param conf - add to this conf
* @param homedir - the home dir path starting with slash
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
index 0c0e8a3..3f9aae2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
@@ -57,7 +57,13 @@ public interface Constants {
* Config variable for specifying a merge link
*/
   public static final String CONFIG_VIEWFS_LINK_MERGE = "linkMerge";
-  
+
+  /**
+   * Config variable for specifying an nfly link. Nfly writes to multiple
+   * locations, and allows reads from the closest one.
+   */
+  String CONFIG_VIEWFS_LINK_NFLY = "linkNfly";
+
   /**
* Config variable for specifying a merge of the root of the mount-table
*  with the root of another file system. 
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index c9bdf63..199ccc6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -134,6 +134,12 @@ abstract class InodeTree {
 }
   }
 
+  enum LinkType {
+SINGLE,
+MERGE,
+NFLY
+  }
+
   /**
* An internal class to represent a mount link.
* A mount link can be single dir link or a merge dir link.
@@ -147,7 +153,6 @@ abstract class InodeTree {
* is changed later it is then ignored (a dir with null entries)
*/
   static class INodeLink extends INode {
-final boolean isMergeLink; // true if MergeLink
 final URI[] targetDirLinkList;
 private T targetFileSystem;   // file system object created from the link.
 // Function to initialize file system. Only applicable for simple links
@@ -155,14 +160,13 @@ abstract class InodeTree {
 private final Object lock = new Object();
 
 /**
- * Construct a mergeLink.
+ 

[hadoop] branch branch-3.2 updated: HADOOP-18109. Ensure that default permissions of directories under internal ViewFS directories are the same as directories on target filesystems. Contributed by Che

2022-02-15 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 5066722  HADOOP-18109. Ensure that default permissions of directories 
under internal ViewFS directories are the same as directories on target 
filesystems. Contributed by Chentao Yu. (3953)
5066722 is described below

commit 5066722eb70b82a532b55ce98a2623852fd195c8
Author: Chentao Yu 
AuthorDate: Thu Apr 15 17:46:40 2021 -0700

HADOOP-18109. Ensure that default permissions of directories under internal 
ViewFS directories are the same as directories on target filesystems. 
Contributed by Chentao Yu. (3953)

(cherry picked from commit 19d90e62fb28539f8c79bbb24f703301489825a6)
---
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java   |  5 -
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java  | 19 +++
 2 files changed, 19 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index a430727..0ff2e73 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -1496,11 +1496,6 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
-public boolean mkdirs(Path dir) throws IOException {
-  return mkdirs(dir, null);
-}
-
-@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
index fcb5257..fdc7464 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
@@ -479,4 +479,23 @@ public class TestViewFileSystemHdfs extends 
ViewFileSystemBaseTest {
 assertEquals("The owner did not match ", owner, 
userUgi.getShortUserName());
 otherfs.delete(user1Path, false);
   }
+
+  @Test
+  public void testInternalDirectoryPermissions() throws IOException {
+LOG.info("Starting testInternalDirectoryPermissions!");
+Configuration localConf = new Configuration(conf);
+ConfigUtil.addLinkFallback(
+localConf, new Path(targetTestRoot, "fallbackDir").toUri());
+FileSystem fs = FileSystem.get(FsConstants.VIEWFS_URI, localConf);
+// check that the default permissions on a sub-folder of an internal
+// directory are the same as those created on non-internal directories.
+Path subDirOfInternalDir = new Path("/internalDir/dir1");
+fs.mkdirs(subDirOfInternalDir);
+
+Path subDirOfRealDir = new Path("/internalDir/linkToDir2/dir1");
+fs.mkdirs(subDirOfRealDir);
+
+assertEquals(fs.getFileStatus(subDirOfInternalDir).getPermission(),
+fs.getFileStatus(subDirOfRealDir).getPermission());
+  }
 }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HADOOP-18109. Ensure that default permissions of directories under internal ViewFS directories are the same as directories on target filesystems. Contributed by Che

2022-02-15 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new d14a7c6  HADOOP-18109. Ensure that default permissions of directories 
under internal ViewFS directories are the same as directories on target 
filesystems. Contributed by Chentao Yu. (3953)
d14a7c6 is described below

commit d14a7c6ee5e881235d18d4cf9241197a1817d745
Author: Chentao Yu 
AuthorDate: Thu Apr 15 17:46:40 2021 -0700

HADOOP-18109. Ensure that default permissions of directories under internal 
ViewFS directories are the same as directories on target filesystems. 
Contributed by Chentao Yu. (3953)

(cherry picked from commit 19d90e62fb28539f8c79bbb24f703301489825a6)
---
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java   |  5 -
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java  | 19 +++
 2 files changed, 19 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 7503edd..8f333d1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -1580,11 +1580,6 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
-public boolean mkdirs(Path dir) throws IOException {
-  return mkdirs(dir, null);
-}
-
-@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
index fcb5257..fdc7464 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
@@ -479,4 +479,23 @@ public class TestViewFileSystemHdfs extends 
ViewFileSystemBaseTest {
 assertEquals("The owner did not match ", owner, 
userUgi.getShortUserName());
 otherfs.delete(user1Path, false);
   }
+
+  @Test
+  public void testInternalDirectoryPermissions() throws IOException {
+LOG.info("Starting testInternalDirectoryPermissions!");
+Configuration localConf = new Configuration(conf);
+ConfigUtil.addLinkFallback(
+localConf, new Path(targetTestRoot, "fallbackDir").toUri());
+FileSystem fs = FileSystem.get(FsConstants.VIEWFS_URI, localConf);
+// check that the default permissions on a sub-folder of an internal
+// directory are the same as those created on non-internal directories.
+Path subDirOfInternalDir = new Path("/internalDir/dir1");
+fs.mkdirs(subDirOfInternalDir);
+
+Path subDirOfRealDir = new Path("/internalDir/linkToDir2/dir1");
+fs.mkdirs(subDirOfRealDir);
+
+assertEquals(fs.getFileStatus(subDirOfInternalDir).getPermission(),
+fs.getFileStatus(subDirOfRealDir).getPermission());
+  }
 }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-18109. Ensure that default permissions of directories under internal ViewFS directories are the same as directories on target filesystems. Contributed by Chentao

2022-02-15 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 19d90e6  HADOOP-18109. Ensure that default permissions of directories 
under internal ViewFS directories are the same as directories on target 
filesystems. Contributed by Chentao Yu. (3953)
19d90e6 is described below

commit 19d90e62fb28539f8c79bbb24f703301489825a6
Author: Chentao Yu 
AuthorDate: Thu Apr 15 17:46:40 2021 -0700

HADOOP-18109. Ensure that default permissions of directories under internal 
ViewFS directories are the same as directories on target filesystems. 
Contributed by Chentao Yu. (3953)
---
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java   |  5 -
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java  | 19 +++
 2 files changed, 19 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 5ff3c2b..8c3cdb8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -1666,11 +1666,6 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
-public boolean mkdirs(Path dir) throws IOException {
-  return mkdirs(dir, null);
-}
-
-@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
index fcb5257..fdc7464 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
@@ -479,4 +479,23 @@ public class TestViewFileSystemHdfs extends 
ViewFileSystemBaseTest {
 assertEquals("The owner did not match ", owner, 
userUgi.getShortUserName());
 otherfs.delete(user1Path, false);
   }
+
+  @Test
+  public void testInternalDirectoryPermissions() throws IOException {
+LOG.info("Starting testInternalDirectoryPermissions!");
+Configuration localConf = new Configuration(conf);
+ConfigUtil.addLinkFallback(
+localConf, new Path(targetTestRoot, "fallbackDir").toUri());
+FileSystem fs = FileSystem.get(FsConstants.VIEWFS_URI, localConf);
+// check that the default permissions on a sub-folder of an internal
+// directory are the same as those created on non-internal directories.
+Path subDirOfInternalDir = new Path("/internalDir/dir1");
+fs.mkdirs(subDirOfInternalDir);
+
+Path subDirOfRealDir = new Path("/internalDir/linkToDir2/dir1");
+fs.mkdirs(subDirOfRealDir);
+
+assertEquals(fs.getFileStatus(subDirOfInternalDir).getPermission(),
+fs.getFileStatus(subDirOfRealDir).getPermission());
+  }
 }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-10650. DFSClient#mkdirs and DFSClient#primitiveMkdir should use default directory permission. Contributed by John Zhuge.

2022-02-15 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new cb5af00  HDFS-10650. DFSClient#mkdirs and DFSClient#primitiveMkdir 
should use default directory permission. Contributed by John Zhuge.
cb5af00 is described below

commit cb5af0012ed94bb8ab63cfdcdfcd1ab17f6660bb
Author: Xiao Chen 
AuthorDate: Thu Jul 28 13:15:02 2016 -0700

HDFS-10650. DFSClient#mkdirs and DFSClient#primitiveMkdir should use 
default directory permission. Contributed by John Zhuge.
---
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java| 14 ++
 .../java/org/apache/hadoop/security/TestPermission.java| 10 ++
 2 files changed, 20 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index ad4e499..32553fb 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1209,6 +1209,13 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 return permission.applyUMask(dfsClientConf.getUMask());
   }
 
+  private FsPermission applyUMaskDir(FsPermission permission) {
+if (permission == null) {
+  permission = FsPermission.getDirDefault();
+}
+return permission.applyUMask(dfsClientConf.getUMask());
+  }
+
   /**
* Same as {@link #create(String, FsPermission, EnumSet, boolean, short, 
long,
* Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
@@ -2458,7 +2465,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
*
* @param src The path of the directory being created
* @param permission The permission of the directory being created.
-   * If permission == null, use {@link FsPermission#getDefault()}.
+   * If permission == null, use {@link FsPermission#getDirDefault()}.
* @param createParent create missing parent directory if true
*
* @return True if the operation success.
@@ -2467,7 +2474,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
*/
   public boolean mkdirs(String src, FsPermission permission,
   boolean createParent) throws IOException {
-final FsPermission masked = applyUMask(permission);
+final FsPermission masked = applyUMaskDir(permission);
 return primitiveMkdir(src, masked, createParent);
   }
 
@@ -2488,9 +2495,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   boolean createParent) throws IOException {
 checkOpen();
 if (absPermission == null) {
-  absPermission = applyUMask(null);
+  absPermission = applyUMaskDir(null);
 }
-
 LOG.debug("{}: masked={}", src, absPermission);
 try (TraceScope ignored = tracer.newScope("mkdir")) {
   return namenode.mkdirs(src, absPermission, createParent);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
index 5e4f693..d3a4956 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
@@ -53,6 +53,7 @@ public class TestPermission {
   final private static Path ROOT_PATH = new Path("/data");
   final private static Path CHILD_DIR1 = new Path(ROOT_PATH, "child1");
   final private static Path CHILD_DIR2 = new Path(ROOT_PATH, "child2");
+  final private static Path CHILD_DIR3 = new Path(ROOT_PATH, "child3");
   final private static Path CHILD_FILE1 = new Path(ROOT_PATH, "file1");
   final private static Path CHILD_FILE2 = new Path(ROOT_PATH, "file2");
   final private static Path CHILD_FILE3 = new Path(ROOT_PATH, "file3");
@@ -237,6 +238,9 @@ public class TestPermission {
   
   // following dir/file creations are legal
   nnfs.mkdirs(CHILD_DIR1);
+  status = nnfs.getFileStatus(CHILD_DIR1);
+  assertThat("Expect 755 = 777 (default dir) - 022 (default umask)",
+  status.getPermission().toString(), is("rwxr-xr-x"));
   out = nnfs.create(CHILD_FILE1);
   status = nnfs.getFileStatus(CHILD_FILE1);
   assertTrue(status.getPermission().toString().equals("rw-r--r--"));
@@ -248,6 +252,12 @@ public class TestPermission {
   status = nnfs.getFileStatus(CHILD_FILE1);
   assertTrue(status.getPermission().toString().equals("rwx--"));
 
+  // mkdirs wi

[hadoop] branch branch-2.10 updated: HADOOP-17999. No-op implementation of setWriteChecksum and setVerifyChecksum in ViewFileSystem. Contributed by Abhishek Das. (#3639)

2021-11-16 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new f9463e5  HADOOP-17999. No-op implementation of setWriteChecksum and 
setVerifyChecksum in ViewFileSystem. Contributed by Abhishek Das. (#3639)
f9463e5 is described below

commit f9463e511191cd100e9ed3f3b10bbad1d3989816
Author: Abhishek Das 
AuthorDate: Tue Nov 16 17:56:30 2021 -0800

HADOOP-17999. No-op implementation of setWriteChecksum and 
setVerifyChecksum in ViewFileSystem. Contributed by Abhishek Das. (#3639)

(cherry picked from commit 54a1d78e16533e286455de62a545ee75cbc1eff5)
---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java| 20 +++---
 .../fs/viewfs/TestViewFileSystemDelegation.java| 12 --
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   | 43 ++
 3 files changed, 49 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 5ee706d..c832cae 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -776,13 +776,9 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
-  public void setVerifyChecksum(final boolean verifyChecksum) { 
-List> mountPoints = 
-fsState.getMountPoints();
-Map fsMap = initializeMountedFileSystems(mountPoints);
-for (InodeTree.MountPoint mount : mountPoints) {
-  fsMap.get(mount.src).setVerifyChecksum(verifyChecksum);
-}
+  public void setVerifyChecksum(final boolean verifyChecksum) {
+// This is a file system level operations, however ViewFileSystem
+// points to many file systems. Noop for ViewFileSystem.
   }
 
   /**
@@ -874,13 +870,9 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
-  public void setWriteChecksum(final boolean writeChecksum) { 
-List> mountPoints = 
-fsState.getMountPoints();
-Map fsMap = initializeMountedFileSystems(mountPoints);
-for (InodeTree.MountPoint mount : mountPoints) {
-  fsMap.get(mount.src).setWriteChecksum(writeChecksum);
-}
+  public void setWriteChecksum(final boolean writeChecksum) {
+// This is a file system level operations, however ViewFileSystem
+// points to many file systems. Noop for ViewFileSystem.
   }
 
   @Override
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
index d8c39f7..3a60d6e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
@@ -83,12 +83,6 @@ public class TestViewFileSystemDelegation { //extends 
ViewFileSystemTestSetup {
 assertEquals(new URI("fs2:/").getAuthority(), fs2.getUri().getAuthority());
   }
   
-  @Test
-  public void testVerifyChecksum() throws Exception {
-checkVerifyChecksum(false);
-checkVerifyChecksum(true);
-  }
-
   /**
* Tests that ViewFileSystem dispatches calls for every ACL method through 
the
* mount table to the correct underlying FileSystem with all Path arguments
@@ -144,12 +138,6 @@ public class TestViewFileSystemDelegation { //extends 
ViewFileSystemTestSetup {
 verify(mockFs2).getAclStatus(mockFsPath2);
   }
 
-  void checkVerifyChecksum(boolean flag) {
-viewFs.setVerifyChecksum(flag);
-assertEquals(flag, fs1.getVerifyChecksum());
-assertEquals(flag, fs2.getVerifyChecksum());
-  }
-
   static class FakeFileSystem extends LocalFileSystem {
 boolean verifyChecksum = true;
 URI uri;
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index d24d92a..1f01ea3 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -1129,4 +1129,47 @@ abstract public class ViewFileSystemBaseTest {
 // viewfs inner cache is disabled
 assertEquals(cacheSize + 2, TestFileUtil.getCacheSize());
   }
+
+  @Test
+  public void testTargetFileSystemLazyInitializationForChecksumMethods()
+  throws Exception {
+final String clusterName = "

[hadoop] branch branch-3.2 updated: HADOOP-17999. No-op implementation of setWriteChecksum and setVerifyChecksum in ViewFileSystem. Contributed by Abhishek Das. (#3639)

2021-11-16 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 0e02f03  HADOOP-17999. No-op implementation of setWriteChecksum and 
setVerifyChecksum in ViewFileSystem. Contributed by Abhishek Das. (#3639)
0e02f03 is described below

commit 0e02f03eae3d74f588ef584119d36a755f9c9e35
Author: Abhishek Das 
AuthorDate: Tue Nov 16 17:56:30 2021 -0800

HADOOP-17999. No-op implementation of setWriteChecksum and 
setVerifyChecksum in ViewFileSystem. Contributed by Abhishek Das. (#3639)

(cherry picked from commit 54a1d78e16533e286455de62a545ee75cbc1eff5)
---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java| 20 +++---
 .../fs/viewfs/TestViewFileSystemDelegation.java| 12 --
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   | 43 ++
 3 files changed, 49 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index a8cadc8..a430727 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -862,13 +862,9 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
-  public void setVerifyChecksum(final boolean verifyChecksum) { 
-List> mountPoints = 
-fsState.getMountPoints();
-Map fsMap = initializeMountedFileSystems(mountPoints);
-for (InodeTree.MountPoint mount : mountPoints) {
-  fsMap.get(mount.src).setVerifyChecksum(verifyChecksum);
-}
+  public void setVerifyChecksum(final boolean verifyChecksum) {
+// This is a file system level operations, however ViewFileSystem
+// points to many file systems. Noop for ViewFileSystem.
   }
 
   /**
@@ -964,13 +960,9 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
-  public void setWriteChecksum(final boolean writeChecksum) { 
-List> mountPoints = 
-fsState.getMountPoints();
-Map fsMap = initializeMountedFileSystems(mountPoints);
-for (InodeTree.MountPoint mount : mountPoints) {
-  fsMap.get(mount.src).setWriteChecksum(writeChecksum);
-}
+  public void setWriteChecksum(final boolean writeChecksum) {
+// This is a file system level operations, however ViewFileSystem
+// points to many file systems. Noop for ViewFileSystem.
   }
 
   @Override
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
index d8c39f7..3a60d6e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
@@ -83,12 +83,6 @@ public class TestViewFileSystemDelegation { //extends 
ViewFileSystemTestSetup {
 assertEquals(new URI("fs2:/").getAuthority(), fs2.getUri().getAuthority());
   }
   
-  @Test
-  public void testVerifyChecksum() throws Exception {
-checkVerifyChecksum(false);
-checkVerifyChecksum(true);
-  }
-
   /**
* Tests that ViewFileSystem dispatches calls for every ACL method through 
the
* mount table to the correct underlying FileSystem with all Path arguments
@@ -144,12 +138,6 @@ public class TestViewFileSystemDelegation { //extends 
ViewFileSystemTestSetup {
 verify(mockFs2).getAclStatus(mockFsPath2);
   }
 
-  void checkVerifyChecksum(boolean flag) {
-viewFs.setVerifyChecksum(flag);
-assertEquals(flag, fs1.getVerifyChecksum());
-assertEquals(flag, fs2.getVerifyChecksum());
-  }
-
   static class FakeFileSystem extends LocalFileSystem {
 boolean verifyChecksum = true;
 URI uri;
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index af7f415..8d82ae6 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -1474,4 +1474,47 @@ abstract public class ViewFileSystemBaseTest {
 // viewfs inner cache is disabled
 assertEquals(cacheSize + 2, TestFileUtil.getCacheSize());
   }
+
+  @Test
+  public void testTargetFileSystemLazyInitializationForChecksumMethods()
+  throws Exception {
+final String clusterName = "

[hadoop] branch branch-3.3 updated: HADOOP-17999. No-op implementation of setWriteChecksum and setVerifyChecksum in ViewFileSystem. Contributed by Abhishek Das. (#3639)

2021-11-16 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new f456dc1  HADOOP-17999. No-op implementation of setWriteChecksum and 
setVerifyChecksum in ViewFileSystem. Contributed by Abhishek Das. (#3639)
f456dc1 is described below

commit f456dc1837b42e4fbe9435644ed024235dc241bb
Author: Abhishek Das 
AuthorDate: Tue Nov 16 17:56:30 2021 -0800

HADOOP-17999. No-op implementation of setWriteChecksum and 
setVerifyChecksum in ViewFileSystem. Contributed by Abhishek Das. (#3639)

(cherry picked from commit 54a1d78e16533e286455de62a545ee75cbc1eff5)
---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java| 20 +++---
 .../fs/viewfs/TestViewFileSystemDelegation.java| 12 --
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   | 43 ++
 3 files changed, 49 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index e81af2a..7503edd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -917,13 +917,9 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
-  public void setVerifyChecksum(final boolean verifyChecksum) { 
-List> mountPoints = 
-fsState.getMountPoints();
-Map fsMap = initializeMountedFileSystems(mountPoints);
-for (InodeTree.MountPoint mount : mountPoints) {
-  fsMap.get(mount.src).setVerifyChecksum(verifyChecksum);
-}
+  public void setVerifyChecksum(final boolean verifyChecksum) {
+// This is a file system level operations, however ViewFileSystem
+// points to many file systems. Noop for ViewFileSystem.
   }
 
   /**
@@ -1019,13 +1015,9 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
-  public void setWriteChecksum(final boolean writeChecksum) { 
-List> mountPoints = 
-fsState.getMountPoints();
-Map fsMap = initializeMountedFileSystems(mountPoints);
-for (InodeTree.MountPoint mount : mountPoints) {
-  fsMap.get(mount.src).setWriteChecksum(writeChecksum);
-}
+  public void setWriteChecksum(final boolean writeChecksum) {
+// This is a file system level operations, however ViewFileSystem
+// points to many file systems. Noop for ViewFileSystem.
   }
 
   @Override
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
index d8c39f7..3a60d6e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
@@ -83,12 +83,6 @@ public class TestViewFileSystemDelegation { //extends 
ViewFileSystemTestSetup {
 assertEquals(new URI("fs2:/").getAuthority(), fs2.getUri().getAuthority());
   }
   
-  @Test
-  public void testVerifyChecksum() throws Exception {
-checkVerifyChecksum(false);
-checkVerifyChecksum(true);
-  }
-
   /**
* Tests that ViewFileSystem dispatches calls for every ACL method through 
the
* mount table to the correct underlying FileSystem with all Path arguments
@@ -144,12 +138,6 @@ public class TestViewFileSystemDelegation { //extends 
ViewFileSystemTestSetup {
 verify(mockFs2).getAclStatus(mockFsPath2);
   }
 
-  void checkVerifyChecksum(boolean flag) {
-viewFs.setVerifyChecksum(flag);
-assertEquals(flag, fs1.getVerifyChecksum());
-assertEquals(flag, fs2.getVerifyChecksum());
-  }
-
   static class FakeFileSystem extends LocalFileSystem {
 boolean verifyChecksum = true;
 URI uri;
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index 5890ecb..e4e7b0e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -1474,4 +1474,47 @@ abstract public class ViewFileSystemBaseTest {
 // viewfs inner cache is disabled
 assertEquals(cacheSize + 2, TestFileUtil.getCacheSize());
   }
+
+  @Test
+  public void testTargetFileSystemLazyInitializationForChecksumMethods()
+  throws Exception {
+final String clusterName = "

[hadoop] branch trunk updated: HADOOP-17999. No-op implementation of setWriteChecksum and setVerifyChecksum in ViewFileSystem. Contributed by Abhishek Das. (#3639)

2021-11-16 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 54a1d78  HADOOP-17999. No-op implementation of setWriteChecksum and 
setVerifyChecksum in ViewFileSystem. Contributed by Abhishek Das. (#3639)
54a1d78 is described below

commit 54a1d78e16533e286455de62a545ee75cbc1eff5
Author: Abhishek Das 
AuthorDate: Tue Nov 16 17:56:30 2021 -0800

HADOOP-17999. No-op implementation of setWriteChecksum and 
setVerifyChecksum in ViewFileSystem. Contributed by Abhishek Das. (#3639)
---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java| 20 +++---
 .../fs/viewfs/TestViewFileSystemDelegation.java| 12 --
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   | 43 ++
 3 files changed, 49 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index ce918a1..538f03a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -917,13 +917,9 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
-  public void setVerifyChecksum(final boolean verifyChecksum) { 
-List> mountPoints = 
-fsState.getMountPoints();
-Map fsMap = initializeMountedFileSystems(mountPoints);
-for (InodeTree.MountPoint mount : mountPoints) {
-  fsMap.get(mount.src).setVerifyChecksum(verifyChecksum);
-}
+  public void setVerifyChecksum(final boolean verifyChecksum) {
+// This is a file system level operations, however ViewFileSystem
+// points to many file systems. Noop for ViewFileSystem.
   }
 
   /**
@@ -1019,13 +1015,9 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
-  public void setWriteChecksum(final boolean writeChecksum) { 
-List> mountPoints = 
-fsState.getMountPoints();
-Map fsMap = initializeMountedFileSystems(mountPoints);
-for (InodeTree.MountPoint mount : mountPoints) {
-  fsMap.get(mount.src).setWriteChecksum(writeChecksum);
-}
+  public void setWriteChecksum(final boolean writeChecksum) {
+// This is a file system level operations, however ViewFileSystem
+// points to many file systems. Noop for ViewFileSystem.
   }
 
   @Override
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
index d8c39f7..3a60d6e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemDelegation.java
@@ -83,12 +83,6 @@ public class TestViewFileSystemDelegation { //extends 
ViewFileSystemTestSetup {
 assertEquals(new URI("fs2:/").getAuthority(), fs2.getUri().getAuthority());
   }
   
-  @Test
-  public void testVerifyChecksum() throws Exception {
-checkVerifyChecksum(false);
-checkVerifyChecksum(true);
-  }
-
   /**
* Tests that ViewFileSystem dispatches calls for every ACL method through 
the
* mount table to the correct underlying FileSystem with all Path arguments
@@ -144,12 +138,6 @@ public class TestViewFileSystemDelegation { //extends 
ViewFileSystemTestSetup {
 verify(mockFs2).getAclStatus(mockFsPath2);
   }
 
-  void checkVerifyChecksum(boolean flag) {
-viewFs.setVerifyChecksum(flag);
-assertEquals(flag, fs1.getVerifyChecksum());
-assertEquals(flag, fs2.getVerifyChecksum());
-  }
-
   static class FakeFileSystem extends LocalFileSystem {
 boolean verifyChecksum = true;
 URI uri;
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index be50f45..434eff0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -1472,4 +1472,47 @@ abstract public class ViewFileSystemBaseTest {
 // viewfs inner cache is disabled
 assertEquals(cacheSize + 2, TestFileUtil.getCacheSize());
   }
+
+  @Test
+  public void testTargetFileSystemLazyInitializationForChecksumMethods()
+  throws Exception {
+final String clusterName = "cluster" + new Random().nextInt();
+Configuration config = new Configuration(con

[hadoop] branch branch-2.10 updated: HDFS-7612: Fix default cache directory in TestOfflineEditsViewer.testStored. Contributed by Michael Kuchenbecker (#3571)

2021-10-21 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 79d1818  HDFS-7612: Fix default cache directory in 
TestOfflineEditsViewer.testStored. Contributed by Michael Kuchenbecker (#3571)
79d1818 is described below

commit 79d181887901edcba4b2f2fdfe4c14882698f672
Author: Michael Kuchenbecker 
AuthorDate: Wed Oct 20 14:38:09 2021 -0700

HDFS-7612: Fix default cache directory in 
TestOfflineEditsViewer.testStored. Contributed by Michael Kuchenbecker (#3571)

(cherry picked from commit a73ff6915ae3e0ced1b4c814a94845f51e655a0c)
---
 .../hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
index bbe1c2e..2dea755 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
@@ -163,7 +163,7 @@ public class TestOfflineEditsViewer {
   public void testStored() throws IOException {
 // reference edits stored with source code (see build.xml)
 final String cacheDir = System.getProperty("test.cache.data",
-"build/test/cache");
+"target/test-classes");
 // binary, XML, reparsed binary
 String editsStored = cacheDir + "/editsStored";
 String editsStoredParsedXml = cacheDir + "/editsStoredParsed.xml";

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-7612: Fix default cache directory in TestOfflineEditsViewer.testStored. Contributed by Michael Kuchenbecker (#3571)

2021-10-21 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 7de539f  HDFS-7612: Fix default cache directory in 
TestOfflineEditsViewer.testStored. Contributed by Michael Kuchenbecker (#3571)
7de539f is described below

commit 7de539f3cacabac00f0a69852742eb17538eb80e
Author: Michael Kuchenbecker 
AuthorDate: Wed Oct 20 14:38:09 2021 -0700

HDFS-7612: Fix default cache directory in 
TestOfflineEditsViewer.testStored. Contributed by Michael Kuchenbecker (#3571)

(cherry picked from commit a73ff6915ae3e0ced1b4c814a94845f51e655a0c)
---
 .../hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
index 8015569..52bdb38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
@@ -164,7 +164,7 @@ public class TestOfflineEditsViewer {
   public void testStored() throws IOException {
 // reference edits stored with source code (see build.xml)
 final String cacheDir = System.getProperty("test.cache.data",
-"build/test/cache");
+"target/test-classes");
 // binary, XML, reparsed binary
 String editsStored = cacheDir + "/editsStored";
 String editsStoredParsedXml = cacheDir + "/editsStoredParsed.xml";

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-7612: Fix default cache directory in TestOfflineEditsViewer.testStored. Contributed by Michael Kuchenbecker (#3571)

2021-10-21 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 78723e0  HDFS-7612: Fix default cache directory in 
TestOfflineEditsViewer.testStored. Contributed by Michael Kuchenbecker (#3571)
78723e0 is described below

commit 78723e045ae7abaf58452a64ada88c7cfd7011b8
Author: Michael Kuchenbecker 
AuthorDate: Wed Oct 20 14:38:09 2021 -0700

HDFS-7612: Fix default cache directory in 
TestOfflineEditsViewer.testStored. Contributed by Michael Kuchenbecker (#3571)

(cherry picked from commit a73ff6915ae3e0ced1b4c814a94845f51e655a0c)
---
 .../hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
index 226e486..6f30416 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
@@ -164,7 +164,7 @@ public class TestOfflineEditsViewer {
   public void testStored() throws IOException {
 // reference edits stored with source code (see build.xml)
 final String cacheDir = System.getProperty("test.cache.data",
-"build/test/cache");
+"target/test-classes");
 // binary, XML, reparsed binary
 String editsStored = cacheDir + "/editsStored";
 String editsStoredParsedXml = cacheDir + "/editsStoredParsed.xml";

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-7612: Fix default cache directory in TestOfflineEditsViewer.testStored. Contributed by Michael Kuchenbecker (#3571)

2021-10-21 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a73ff69  HDFS-7612: Fix default cache directory in 
TestOfflineEditsViewer.testStored. Contributed by Michael Kuchenbecker (#3571)
a73ff69 is described below

commit a73ff6915ae3e0ced1b4c814a94845f51e655a0c
Author: Michael Kuchenbecker 
AuthorDate: Wed Oct 20 14:38:09 2021 -0700

HDFS-7612: Fix default cache directory in 
TestOfflineEditsViewer.testStored. Contributed by Michael Kuchenbecker (#3571)
---
 .../hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
index 226e486..6f30416 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
@@ -164,7 +164,7 @@ public class TestOfflineEditsViewer {
   public void testStored() throws IOException {
 // reference edits stored with source code (see build.xml)
 final String cacheDir = System.getProperty("test.cache.data",
-"build/test/cache");
+"target/test-classes");
 // binary, XML, reparsed binary
 String editsStored = cacheDir + "/editsStored";
 String editsStoredParsedXml = cacheDir + "/editsStoredParsed.xml";

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HADOOP-16532. Fix TestViewFsTrash to use the correct homeDir. Contributed by Xing Lin. (#3514)

2021-10-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 2565ec5  HADOOP-16532. Fix TestViewFsTrash to use the correct homeDir. 
Contributed by Xing Lin. (#3514)
2565ec5 is described below

commit 2565ec58046450802a345eea311c3ebc01620d17
Author: Xing Lin 
AuthorDate: Wed Oct 13 13:43:47 2021 -0700

HADOOP-16532. Fix TestViewFsTrash to use the correct homeDir. Contributed 
by Xing Lin. (#3514)

(cherry picked from commit 97c0f968792e1a45a1569a3184af7b114fc8c022)
---
 .../test/java/org/apache/hadoop/fs/TestTrash.java  |  7 ++--
 .../apache/hadoop/fs/viewfs/TestViewFsTrash.java   | 41 +-
 2 files changed, 20 insertions(+), 28 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 1a6d580..2e75f85 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -117,9 +117,10 @@ public class TestTrash extends TestCase {
 
   /**
* Test trash for the shell's delete command for the default file system
-   * specified in the paramter conf
-   * @param conf 
+   * specified in the parameter conf
+   * @param conf - configuration object for the filesystem
* @param base - the base path where files are created
+   * @param trashRootFs - the filesystem object to test trash
* @param trashRoot - the expected place where the trashbin resides
* @throws IOException
*/
@@ -679,7 +680,7 @@ public class TestTrash extends TestCase {
 }
   }
 
-  static class TestLFS extends LocalFileSystem {
+  public static class TestLFS extends LocalFileSystem {
 Path home;
 TestLFS() {
   this(TEST_DIR);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
index 62ef9d1..8e5fa72 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
@@ -17,14 +17,10 @@
  */
 package org.apache.hadoop.fs.viewfs;
 
-
-import java.io.IOException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FsConstants;
-import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.TestTrash;
 import org.junit.After;
@@ -35,31 +31,26 @@ public class TestViewFsTrash {
   FileSystem fsTarget;  // the target file system - the mount will point here
   FileSystem fsView;
   Configuration conf;
-  FileSystemTestHelper fileSystemTestHelper = new FileSystemTestHelper();
-
-  class TestLFS extends LocalFileSystem {
-Path home;
-TestLFS() throws IOException {
-  this(new Path(fileSystemTestHelper.getTestRootDir()));
-}
-TestLFS(Path home) throws IOException {
-  super();
-  this.home = home;
-}
-@Override
-public Path getHomeDirectory() {
-  return home;
-}
-  }
+  private FileSystemTestHelper fileSystemTestHelper;
 
   @Before
   public void setUp() throws Exception {
-fsTarget = FileSystem.getLocal(new Configuration());
-fsTarget.mkdirs(new Path(fileSystemTestHelper.
-getTestRootPath(fsTarget), "dir1"));
+Configuration targetFSConf = new Configuration();
+targetFSConf.setClass("fs.file.impl", TestTrash.TestLFS.class, 
FileSystem.class);
+
+fsTarget = FileSystem.getLocal(targetFSConf);
+fileSystemTestHelper = new 
FileSystemTestHelper(fsTarget.getHomeDirectory().toUri().getPath());
+
 conf = ViewFileSystemTestSetup.createConfig();
 fsView = ViewFileSystemTestSetup.setupForViewFileSystem(conf, 
fileSystemTestHelper, fsTarget);
 conf.set("fs.defaultFS", FsConstants.VIEWFS_URI.toString());
+
+/*
+ * Need to set the fs.file.impl to TestViewFsTrash.TestLFS. Otherwise, it 
will load
+ * LocalFileSystem implementation which uses 
System.getProperty("user.home") for homeDirectory.
+ */
+conf.setClass("fs.file.impl", TestTrash.TestLFS.class, FileSystem.class);
+
   }
  
   @After
@@ -70,9 +61,9 @@ public class TestViewFsTrash {
   }
   
   @Test
-  public void testTrash() throws IOException {
+  public void testTrash() throws Exception {
 TestTrash.trashShell(conf, fileSystemTestHelper.getTestRootPath(fsView),
-fsTarget, new Path(fsTarget.getHomeDirectory(), ".Trash/Current"));
+ 

[hadoop] branch branch-3.2 updated: HADOOP-16532. Fix TestViewFsTrash to use the correct homeDir. Contributed by Xing Lin. (#3514)

2021-10-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new c8f22fe  HADOOP-16532. Fix TestViewFsTrash to use the correct homeDir. 
Contributed by Xing Lin. (#3514)
c8f22fe is described below

commit c8f22fe7374415791617eb786ab345d99e4e32fd
Author: Xing Lin 
AuthorDate: Wed Oct 13 13:43:47 2021 -0700

HADOOP-16532. Fix TestViewFsTrash to use the correct homeDir. Contributed 
by Xing Lin. (#3514)

(cherry picked from commit 97c0f968792e1a45a1569a3184af7b114fc8c022)
---
 .../test/java/org/apache/hadoop/fs/TestTrash.java  |  7 ++--
 .../apache/hadoop/fs/viewfs/TestViewFsTrash.java   | 41 +-
 2 files changed, 20 insertions(+), 28 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index cf22f3b..efb2a06 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -120,9 +120,10 @@ public class TestTrash {
 
   /**
* Test trash for the shell's delete command for the default file system
-   * specified in the paramter conf
-   * @param conf 
+   * specified in the parameter conf
+   * @param conf - configuration object for the filesystem
* @param base - the base path where files are created
+   * @param trashRootFs - the filesystem object to test trash
* @param trashRoot - the expected place where the trashbin resides
* @throws IOException
*/
@@ -778,7 +779,7 @@ public class TestTrash {
 }
   }
 
-  static class TestLFS extends LocalFileSystem {
+  public static class TestLFS extends LocalFileSystem {
 Path home;
 TestLFS() {
   this(TEST_DIR);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
index 62ef9d1..8e5fa72 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
@@ -17,14 +17,10 @@
  */
 package org.apache.hadoop.fs.viewfs;
 
-
-import java.io.IOException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FsConstants;
-import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.TestTrash;
 import org.junit.After;
@@ -35,31 +31,26 @@ public class TestViewFsTrash {
   FileSystem fsTarget;  // the target file system - the mount will point here
   FileSystem fsView;
   Configuration conf;
-  FileSystemTestHelper fileSystemTestHelper = new FileSystemTestHelper();
-
-  class TestLFS extends LocalFileSystem {
-Path home;
-TestLFS() throws IOException {
-  this(new Path(fileSystemTestHelper.getTestRootDir()));
-}
-TestLFS(Path home) throws IOException {
-  super();
-  this.home = home;
-}
-@Override
-public Path getHomeDirectory() {
-  return home;
-}
-  }
+  private FileSystemTestHelper fileSystemTestHelper;
 
   @Before
   public void setUp() throws Exception {
-fsTarget = FileSystem.getLocal(new Configuration());
-fsTarget.mkdirs(new Path(fileSystemTestHelper.
-getTestRootPath(fsTarget), "dir1"));
+Configuration targetFSConf = new Configuration();
+targetFSConf.setClass("fs.file.impl", TestTrash.TestLFS.class, 
FileSystem.class);
+
+fsTarget = FileSystem.getLocal(targetFSConf);
+fileSystemTestHelper = new 
FileSystemTestHelper(fsTarget.getHomeDirectory().toUri().getPath());
+
 conf = ViewFileSystemTestSetup.createConfig();
 fsView = ViewFileSystemTestSetup.setupForViewFileSystem(conf, 
fileSystemTestHelper, fsTarget);
 conf.set("fs.defaultFS", FsConstants.VIEWFS_URI.toString());
+
+/*
+ * Need to set the fs.file.impl to TestViewFsTrash.TestLFS. Otherwise, it 
will load
+ * LocalFileSystem implementation which uses 
System.getProperty("user.home") for homeDirectory.
+ */
+conf.setClass("fs.file.impl", TestTrash.TestLFS.class, FileSystem.class);
+
   }
  
   @After
@@ -70,9 +61,9 @@ public class TestViewFsTrash {
   }
   
   @Test
-  public void testTrash() throws IOException {
+  public void testTrash() throws Exception {
 TestTrash.trashShell(conf, fileSystemTestHelper.getTestRootPath(fsView),
-fsTarget, new Path(fsTarget.getHomeDirectory(), ".Trash/Current"));
+fsView, new Path(fi

[hadoop] branch branch-3.3 updated: HADOOP-16532. Fix TestViewFsTrash to use the correct homeDir. Contributed by Xing Lin. (#3514)

2021-10-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new af920f1  HADOOP-16532. Fix TestViewFsTrash to use the correct homeDir. 
Contributed by Xing Lin. (#3514)
af920f1 is described below

commit af920f138b1f8dcd2417f6af77ab96672f2bc4bc
Author: Xing Lin 
AuthorDate: Wed Oct 13 13:43:47 2021 -0700

HADOOP-16532. Fix TestViewFsTrash to use the correct homeDir. Contributed 
by Xing Lin. (#3514)

(cherry picked from commit 97c0f968792e1a45a1569a3184af7b114fc8c022)
---
 .../test/java/org/apache/hadoop/fs/TestTrash.java  |  7 ++--
 .../apache/hadoop/fs/viewfs/TestViewFsTrash.java   | 39 +-
 2 files changed, 19 insertions(+), 27 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index e8e0287..7228778 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -123,9 +123,10 @@ public class TestTrash {
 
   /**
* Test trash for the shell's delete command for the default file system
-   * specified in the paramter conf
-   * @param conf 
+   * specified in the parameter conf
+   * @param conf - configuration object for the filesystem
* @param base - the base path where files are created
+   * @param trashRootFs - the filesystem object to test trash
* @param trashRoot - the expected place where the trashbin resides
* @throws IOException
*/
@@ -793,7 +794,7 @@ public class TestTrash {
 }
   }
 
-  static class TestLFS extends LocalFileSystem {
+  public static class TestLFS extends LocalFileSystem {
 private URI uriName = null;
 Path home;
 TestLFS() {
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
index 94c3262..8e5fa72 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
@@ -17,14 +17,10 @@
  */
 package org.apache.hadoop.fs.viewfs;
 
-
-import java.io.IOException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FsConstants;
-import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.TestTrash;
 import org.junit.After;
@@ -35,31 +31,26 @@ public class TestViewFsTrash {
   FileSystem fsTarget;  // the target file system - the mount will point here
   FileSystem fsView;
   Configuration conf;
-  FileSystemTestHelper fileSystemTestHelper = new FileSystemTestHelper();
-
-  class TestLFS extends LocalFileSystem {
-Path home;
-TestLFS() throws IOException {
-  this(new Path(fileSystemTestHelper.getTestRootDir()));
-}
-TestLFS(Path home) throws IOException {
-  super();
-  this.home = home;
-}
-@Override
-public Path getHomeDirectory() {
-  return home;
-}
-  }
+  private FileSystemTestHelper fileSystemTestHelper;
 
   @Before
   public void setUp() throws Exception {
-fsTarget = FileSystem.getLocal(new Configuration());
-fsTarget.mkdirs(new Path(fileSystemTestHelper.
-getTestRootPath(fsTarget), "dir1"));
+Configuration targetFSConf = new Configuration();
+targetFSConf.setClass("fs.file.impl", TestTrash.TestLFS.class, 
FileSystem.class);
+
+fsTarget = FileSystem.getLocal(targetFSConf);
+fileSystemTestHelper = new 
FileSystemTestHelper(fsTarget.getHomeDirectory().toUri().getPath());
+
 conf = ViewFileSystemTestSetup.createConfig();
 fsView = ViewFileSystemTestSetup.setupForViewFileSystem(conf, 
fileSystemTestHelper, fsTarget);
 conf.set("fs.defaultFS", FsConstants.VIEWFS_URI.toString());
+
+/*
+ * Need to set the fs.file.impl to TestViewFsTrash.TestLFS. Otherwise, it 
will load
+ * LocalFileSystem implementation which uses 
System.getProperty("user.home") for homeDirectory.
+ */
+conf.setClass("fs.file.impl", TestTrash.TestLFS.class, FileSystem.class);
+
   }
  
   @After
@@ -72,7 +63,7 @@ public class TestViewFsTrash {
   @Test
   public void testTrash() throws Exception {
 TestTrash.trashShell(conf, fileSystemTestHelper.getTestRootPath(fsView),
-fsTarget, new Path(fsTarget.getHomeDirectory(), ".Trash/Current"));
+fsView, new Path(fi

[hadoop] branch trunk updated: HADOOP-16532. Fix TestViewFsTrash to use the correct homeDir. Contributed by Xing Lin. (#3514)

2021-10-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 97c0f96  HADOOP-16532. Fix TestViewFsTrash to use the correct homeDir. 
Contributed by Xing Lin. (#3514)
97c0f96 is described below

commit 97c0f968792e1a45a1569a3184af7b114fc8c022
Author: Xing Lin 
AuthorDate: Wed Oct 13 13:43:47 2021 -0700

HADOOP-16532. Fix TestViewFsTrash to use the correct homeDir. Contributed 
by Xing Lin. (#3514)
---
 .../test/java/org/apache/hadoop/fs/TestTrash.java  |  7 ++--
 .../apache/hadoop/fs/viewfs/TestViewFsTrash.java   | 39 +-
 2 files changed, 19 insertions(+), 27 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index e8e0287..7228778 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -123,9 +123,10 @@ public class TestTrash {
 
   /**
* Test trash for the shell's delete command for the default file system
-   * specified in the paramter conf
-   * @param conf 
+   * specified in the parameter conf
+   * @param conf - configuration object for the filesystem
* @param base - the base path where files are created
+   * @param trashRootFs - the filesystem object to test trash
* @param trashRoot - the expected place where the trashbin resides
* @throws IOException
*/
@@ -793,7 +794,7 @@ public class TestTrash {
 }
   }
 
-  static class TestLFS extends LocalFileSystem {
+  public static class TestLFS extends LocalFileSystem {
 private URI uriName = null;
 Path home;
 TestLFS() {
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
index 94c3262..8e5fa72 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
@@ -17,14 +17,10 @@
  */
 package org.apache.hadoop.fs.viewfs;
 
-
-import java.io.IOException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FsConstants;
-import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.TestTrash;
 import org.junit.After;
@@ -35,31 +31,26 @@ public class TestViewFsTrash {
   FileSystem fsTarget;  // the target file system - the mount will point here
   FileSystem fsView;
   Configuration conf;
-  FileSystemTestHelper fileSystemTestHelper = new FileSystemTestHelper();
-
-  class TestLFS extends LocalFileSystem {
-Path home;
-TestLFS() throws IOException {
-  this(new Path(fileSystemTestHelper.getTestRootDir()));
-}
-TestLFS(Path home) throws IOException {
-  super();
-  this.home = home;
-}
-@Override
-public Path getHomeDirectory() {
-  return home;
-}
-  }
+  private FileSystemTestHelper fileSystemTestHelper;
 
   @Before
   public void setUp() throws Exception {
-fsTarget = FileSystem.getLocal(new Configuration());
-fsTarget.mkdirs(new Path(fileSystemTestHelper.
-getTestRootPath(fsTarget), "dir1"));
+Configuration targetFSConf = new Configuration();
+targetFSConf.setClass("fs.file.impl", TestTrash.TestLFS.class, 
FileSystem.class);
+
+fsTarget = FileSystem.getLocal(targetFSConf);
+fileSystemTestHelper = new 
FileSystemTestHelper(fsTarget.getHomeDirectory().toUri().getPath());
+
 conf = ViewFileSystemTestSetup.createConfig();
 fsView = ViewFileSystemTestSetup.setupForViewFileSystem(conf, 
fileSystemTestHelper, fsTarget);
 conf.set("fs.defaultFS", FsConstants.VIEWFS_URI.toString());
+
+/*
+ * Need to set the fs.file.impl to TestViewFsTrash.TestLFS. Otherwise, it 
will load
+ * LocalFileSystem implementation which uses 
System.getProperty("user.home") for homeDirectory.
+ */
+conf.setClass("fs.file.impl", TestTrash.TestLFS.class, FileSystem.class);
+
   }
  
   @After
@@ -72,7 +63,7 @@ public class TestViewFsTrash {
   @Test
   public void testTrash() throws Exception {
 TestTrash.trashShell(conf, fileSystemTestHelper.getTestRootPath(fsView),
-fsTarget, new Path(fsTarget.getHomeDirectory(), ".Trash/Current"));
+fsView, new Path(fileSystemTestHelper.getTestRootPath(fsView), 
".Trash/Current"));
   }
   
 }

---

[hadoop] branch branch-2.10 updated: HDFS-14216. NullPointerException happens in NamenodeWebHdfs. Contributed by lujie.

2021-09-10 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new edc1381  HDFS-14216. NullPointerException happens in NamenodeWebHdfs. 
Contributed by lujie.
edc1381 is described below

commit edc138186f02595847961347a27d77dea5a3607a
Author: Surendra Singh Lilhore 
AuthorDate: Thu Feb 21 20:36:34 2019 +0530

HDFS-14216. NullPointerException happens in NamenodeWebHdfs. Contributed by 
lujie.

(cherry picked from commit 92b53c40f070bbfe65c736f6f3eca721b9d227f5)
(cherry picked from commit 2e939515dfbaf26ca466c8a755cedde0ce4e9c1a)
---
 .../web/resources/NamenodeWebHdfsMethods.java  | 18 +
 .../web/resources/TestWebHdfsDataLocality.java | 23 ++
 2 files changed, 37 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index af71f9c..a6250a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -265,11 +265,21 @@ public class NamenodeWebHdfsMethods {
   for (String host : StringUtils
   .getTrimmedStringCollection(excludeDatanodes)) {
 int idx = host.indexOf(":");
-if (idx != -1) {  
-  excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
-  host.substring(0, idx), Integer.parseInt(host.substring(idx + 
1;
+Node excludeNode = null;
+if (idx != -1) {
+  excludeNode = bm.getDatanodeManager().getDatanodeByXferAddr(
+ host.substring(0, idx), Integer.parseInt(host.substring(idx + 
1)));
 } else {
-  excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
+  excludeNode = bm.getDatanodeManager().getDatanodeByHost(host);
+}
+
+if (excludeNode != null) {
+  excludes.add(excludeNode);
+} else {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("DataNode " + host + " was requested to be excluded, "
++ "but it was not found.");
+  }
 }
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
index 759719d..61e429d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
@@ -239,6 +239,29 @@ public class TestWebHdfsDataLocality {
   }
 
   @Test
+  public void testExcludeWrongDataNode() throws Exception {
+final Configuration conf = WebHdfsTestUtil.createConf();
+final String[] racks = {RACK0};
+final String[] hosts = {"DataNode1"};
+final int nDataNodes = hosts.length;
+
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
+try {
+  cluster.waitActive();
+  final NameNode namenode = cluster.getNameNode();
+  NamenodeWebHdfsMethods.chooseDatanode(
+  namenode, "/path", PutOpParam.Op.CREATE, 0,
+  DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT,
+  "DataNode2", LOCALHOST, null);
+} catch (Exception e) {
+  Assert.fail("Failed to exclude DataNode2" + e.getMessage());
+} finally {
+  cluster.shutdown();
+}
+  }
+
+  @Test
   public void testChooseDatanodeBeforeNamesystemInit() throws Exception {
 NameNode nn = mock(NameNode.class);
 when(nn.getNamesystem()).thenReturn(null);

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 04/06: HDFS-16128. [FGL] Added support for saving/loading an FS Image for PartitionedGSet. Contributed by Xing Lin. (#3201)

2021-08-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 4610e1d90273f46bc3b770de8c33b264660b17bc
Author: Xing Lin 
AuthorDate: Sat Jul 31 12:56:05 2021 -0700

HDFS-16128. [FGL] Added support for saving/loading an FS Image for 
PartitionedGSet. Contributed by Xing Lin. (#3201)
---
 .../org/apache/hadoop/util/PartitionedGSet.java|  24 +++--
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |   4 +-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  70 ++
 .../hadoop/hdfs/server/namenode/FSImage.java   |  12 +++
 .../hdfs/server/namenode/FSImageFormatPBINode.java |  11 ++-
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 105 -
 6 files changed, 168 insertions(+), 58 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
index f3569cc..f493402 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -68,7 +68,7 @@ public class PartitionedGSet implements 
GSet {
* Consists of a hash table {@link LightWeightGSet} and a lock, which
* controls access to this partition independently on the other ones.
*/
-  private class PartitionEntry extends LightWeightGSet {
+  public class PartitionEntry extends LightWeightGSet {
 private final LatchLock partLock;
 
 PartitionEntry(int defaultPartitionCapacity) {
@@ -121,7 +121,7 @@ public class PartitionedGSet implements 
GSet {
 return size;
   }
 
-  protected PartitionEntry getPartition(final K key) {
+  public PartitionEntry getPartition(final K key) {
 Entry partEntry = partitions.floorEntry(key);
 if(partEntry == null) {
   return null;
@@ -174,6 +174,10 @@ public class PartitionedGSet implements 
GSet {
 E result = part.put(element);
 if(result == null) {  // new element
   size++;
+  LOG.debug("partitionPGSet.put: added key {}, size is now {} ", key, 
size);
+} else {
+  LOG.debug("partitionPGSet.put: replaced key {}, size is now {}",
+  key, size);
 }
 return result;
   }
@@ -230,19 +234,25 @@ public class PartitionedGSet implements 
GSet {
   try {
 long[] key = (long[]) inodeClass.
 getMethod("getNamespaceKey", int.class).invoke(e.getKey(), 2);
-long[] firstKey = new long[0];
+long[] firstKey = new long[key.length];
 if(part.iterator().hasNext()) {
   Object first = part.iterator().next();
-  firstKey = (long[]) inodeClass.getMethod(
+  long[] firstKeyRef = (long[]) inodeClass.getMethod(
 "getNamespaceKey", int.class).invoke(first, 2);
   Object parent = inodeClass.
   getMethod("getParent").invoke(first);
   long parentId = (parent == null ? 0L :
 (long) inodeClass.getMethod("getId").invoke(parent));
+  for (int j=0; j < key.length; j++) {
+firstKey[j] = firstKeyRef[j];
+  }
   firstKey[0] = parentId;
 }
 LOG.error("Partition #{}\t key: {}\t size: {}\t first: {}",
 i++, key, s, firstKey);  // SHV should be info
+  } catch (NoSuchElementException ex) {
+LOG.error("iterator.next() throws NoSuchElementException.");
+throw ex;
   } catch (Exception ex) {
 LOG.error("Cannot find Method getNamespaceKey() in {}", inodeClass);
   }
@@ -250,8 +260,8 @@ public class PartitionedGSet implements 
GSet {
 partSizeAvg = (int) (totalSize / parts.size());
 LOG.error("Partition sizes: min = {}, avg = {}, max = {}, sum = {}",
 partSizeMin, partSizeAvg, partSizeMax, totalSize);
-LOG.error("Number of partitions: empty = {}, full = {}",
-numEmptyPartitions, numFullPartitions);
+LOG.error("Number of partitions: empty = {}, in-use = {}, full = {}",
+numEmptyPartitions, parts.size()-numEmptyPartitions, 
numFullPartitions);
   }
 
   @Override
@@ -277,6 +287,8 @@ public class PartitionedGSet implements 
GSet {
 private Iterator keyIterator;
 private Iterator partitionIterator;
 
+// Set partitionIterator to point to the first partition, or set it to null
+// when there is no partitions created for this PartitionedGSet.
 public EntryIterator() {
   keyIterator = partitions.keySet().iterator();
  
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.jav

[hadoop] 06/06: HDFS-16141. [FGL] Address permission related issues with File / Directory. Contributed by Renukaprasad C. (#3205)

2021-08-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 892fa48c5a71eadc61b0f7a3cf22327b0515f0d4
Author: prasad-acit 
AuthorDate: Fri Aug 13 11:51:18 2021 -0700

HDFS-16141. [FGL] Address permission related issues with File / Directory. 
Contributed by Renukaprasad C. (#3205)
---
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  | 21 ++---
 .../hdfs/server/namenode/FSDirWriteFileOp.java |  2 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../hdfs/server/namenode/FSNamesystemLock.java |  2 +-
 .../hadoop/hdfs/server/namenode/INodeMap.java  |  3 +-
 .../java/org/apache/hadoop/hdfs/TestDFSMkdirs.java | 53 ++
 .../org/apache/hadoop/hdfs/TestFileCreation.java   |  4 ++
 7 files changed, 76 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index f6febe2..f1b2ee2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -70,7 +70,7 @@ class FSDirMkdirOp {
 // create multiple inodes.
 fsn.checkFsObjectLimit();
 
-iip = createMissingDirs(fsd, iip, permissions);
+iip = createMissingDirs(fsd, iip, permissions, false);
   }
   return fsd.getAuditFileInfo(iip);
 } finally {
@@ -78,11 +78,14 @@ class FSDirMkdirOp {
 }
   }
 
-  static INodesInPath createMissingDirs(FSDirectory fsd,
-  INodesInPath iip, PermissionStatus permissions) throws IOException {
+  static INodesInPath createMissingDirs(FSDirectory fsd, INodesInPath iip,
+  PermissionStatus permissions, boolean inheritPerms) throws IOException {
+PermissionStatus basePerm = inheritPerms ?
+iip.getExistingINodes().getLastINode().getPermissionStatus() :
+permissions;
 // create all missing directories along the path,
 // but don't add them to the INodeMap yet
-permissions = addImplicitUwx(permissions, permissions); // SHV !!!
+permissions = addImplicitUwx(basePerm, permissions);
 INode[] missing = createPathDirectories(fsd, iip, permissions);
 iip = iip.getExistingINodes();
 if (missing.length == 0) {
@@ -90,8 +93,15 @@ class FSDirMkdirOp {
 }
 // switch the locks
 fsd.getINodeMap().latchWriteLock(iip, missing);
+int counter = 0;
 // Add missing inodes to the INodeMap
 for (INode dir : missing) {
+  if (counter++ == missing.length - 1) {
+//Last folder in the path, use the user given permission
+//For MKDIR - refers to the permission given by the user
+//For create - refers to the parent directory permission.
+permissions = basePerm;
+  }
   iip = addSingleDirectory(fsd, iip, dir, permissions);
   assert iip != null : "iip should not be null";
 }
@@ -279,13 +289,10 @@ class FSDirMkdirOp {
 // create the missing directories along the path
 INode[] missing = new INode[numMissing];
 final int last = iip.length();
-INode parent = existing.getLastINode();
 for (int i = existing.length();  i < last; i++) {
   byte[] component = iip.getPathComponent(i);
   missing[i - existing.length()] =
   createDirectoryINode(fsd, existing, component, perm);
-  missing[i - existing.length()].setParent(parent.asDirectory());
-  parent = missing[i - existing.length()];
 }
 return missing;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index f2cca7b3..96f9907 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -400,7 +400,7 @@ class FSDirWriteFileOp {
 fsn.checkFsObjectLimit();
 INodeFile newNode = null;
 INodesInPath parent = FSDirMkdirOp.createMissingDirs(fsd,
-iip.getParentINodesInPath(), permissions);
+iip.getParentINodesInPath(), permissions, true);
 if (parent != null) {
   iip = addFile(fsd, parent, iip.getLastLocalName(), permissions,
   replication, blockSize, holder, clientMachine, shouldReplicate,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a8fb490..96324e3 100644
--- 
a/hadoop-hdfs-project/hadoop-

[hadoop] 05/06: HDFS-16130. [FGL] Implement CREATE File with FGL. Contributed by Renukaprasad C. (#3205)

2021-08-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 88484fceb4c942c7c6f7a2baae2454b1042363ce
Author: Renukaprasad C 
AuthorDate: Fri Jul 23 15:24:34 2021 -0700

HDFS-16130. [FGL] Implement CREATE File with FGL. Contributed by 
Renukaprasad C. (#3205)
---
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  | 36 ++---
 .../hdfs/server/namenode/FSDirWriteFileOp.java | 94 +++---
 2 files changed, 87 insertions(+), 43 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index ef08e9e..f6febe2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -70,18 +70,7 @@ class FSDirMkdirOp {
 // create multiple inodes.
 fsn.checkFsObjectLimit();
 
-// create all missing directories along the path,
-// but don't add them to the INodeMap yet
-permissions = addImplicitUwx(permissions, permissions); // SHV !!!
-INode[] missing = createPathDirectories(fsd, iip, permissions);
-iip = iip.getExistingINodes();
-// switch the locks
-fsd.getINodeMap().latchWriteLock(iip, missing);
-// Add missing inodes to the INodeMap
-for(INode dir : missing) {
-  iip = addSingleDirectory(fsd, iip, dir, permissions);
-  assert iip != null : "iip should not be null";
-}
+iip = createMissingDirs(fsd, iip, permissions);
   }
   return fsd.getAuditFileInfo(iip);
 } finally {
@@ -89,6 +78,26 @@ class FSDirMkdirOp {
 }
   }
 
+  static INodesInPath createMissingDirs(FSDirectory fsd,
+  INodesInPath iip, PermissionStatus permissions) throws IOException {
+// create all missing directories along the path,
+// but don't add them to the INodeMap yet
+permissions = addImplicitUwx(permissions, permissions); // SHV !!!
+INode[] missing = createPathDirectories(fsd, iip, permissions);
+iip = iip.getExistingINodes();
+if (missing.length == 0) {
+  return iip;
+}
+// switch the locks
+fsd.getINodeMap().latchWriteLock(iip, missing);
+// Add missing inodes to the INodeMap
+for (INode dir : missing) {
+  iip = addSingleDirectory(fsd, iip, dir, permissions);
+  assert iip != null : "iip should not be null";
+}
+return iip;
+  }
+
   /**
* For a given absolute path, create all ancestors as directories along the
* path. All ancestors inherit their parent's permission plus an implicit
@@ -253,6 +262,9 @@ class FSDirMkdirOp {
 return dir;
   }
 
+  /**
+   * Find-out missing iNodes for the current mkdir OP.
+   */
   private static INode[] createPathDirectories(FSDirectory fsd,
   INodesInPath iip, PermissionStatus perm)
   throws IOException {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 0d9c6ae..f2cca7b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -228,6 +228,13 @@ class FSDirWriteFileOp {
 // while chooseTarget() was executing.
 LocatedBlock[] onRetryBlock = new LocatedBlock[1];
 INodesInPath iip = fsn.dir.resolvePath(null, src, fileId);
+
+INode[] missing = new INode[]{iip.getLastINode()};
+INodesInPath existing = iip.getParentINodesInPath();
+FSDirectory fsd = fsn.getFSDirectory();
+// switch the locks
+fsd.getINodeMap().latchWriteLock(existing, missing);
+
 FileState fileState = analyzeFileState(fsn, iip, fileId, clientName,
previous, onRetryBlock);
 final INodeFile pendingFile = fileState.inode;
@@ -392,8 +399,8 @@ class FSDirWriteFileOp {
 }
 fsn.checkFsObjectLimit();
 INodeFile newNode = null;
-INodesInPath parent =
-FSDirMkdirOp.createAncestorDirectories(fsd, iip, permissions);
+INodesInPath parent = FSDirMkdirOp.createMissingDirs(fsd,
+iip.getParentINodesInPath(), permissions);
 if (parent != null) {
   iip = addFile(fsd, parent, iip.getLastLocalName(), permissions,
   replication, blockSize, holder, clientMachine, shouldReplicate,
@@ -541,41 +548,22 @@ class FSDirWriteFileOp {
   FSDirectory fsd, INodesInPath existing, byte[] localName,
   PermissionStatus permissions, short replication, lon

[hadoop] 03/06: HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. Contributed by Xing Lin. (#3197)

2021-08-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit c06cd9ae8cfe65e4c91139a13621ecf6ac916ff0
Author: Xing Lin 
AuthorDate: Fri Jul 16 13:04:59 2021 -0700

HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. Contributed by Xing 
Lin. (#3197)
---
 .../java/org/apache/hadoop/util/LatchLock.java |   4 +-
 .../org/apache/hadoop/util/PartitionedGSet.java|  35 ++-
 .../apache/hadoop/util/TestPartitionedGSet.java| 270 +
 .../hadoop/hdfs/server/namenode/INodeMap.java  |   4 +-
 4 files changed, 300 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
index 41e33da..fd98391 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
@@ -30,7 +30,7 @@ public abstract class LatchLock {
   protected abstract boolean isReadTopLocked();
   /** @return true topLock is locked for write by any thread */
   protected abstract boolean isWriteTopLocked();
-  protected abstract void readTopdUnlock();
+  protected abstract void readTopUnlock();
   protected abstract void writeTopUnlock();
 
   protected abstract boolean hasReadChildLock();
@@ -46,7 +46,7 @@ public abstract class LatchLock {
   // Public APIs to use with the class
   public void readLock() {
 readChildLock();
-readTopdUnlock();
+readTopUnlock();
   }
 
   public void readUnlock() {
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
index 7ebb1b3..f3569cc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -24,7 +24,7 @@ import java.util.Map.Entry;
 import java.util.NavigableMap;
 import java.util.Set;
 import java.util.TreeMap;
-
+import java.util.NoSuchElementException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
@@ -79,8 +79,7 @@ public class PartitionedGSet implements 
GSet {
 
   public PartitionedGSet(final int capacity,
   final Comparator comparator,
-  final LatchLock latchLock,
-  final E rootKey) {
+  final LatchLock latchLock) {
 this.partitions = new TreeMap(comparator);
 this.latchLock = latchLock;
 // addNewPartition(rootKey).put(rootKey);
@@ -275,17 +274,36 @@ public class PartitionedGSet implements 
GSet {
* modifying other partitions, while iterating through the current one.
*/
   private class EntryIterator implements Iterator {
-private final Iterator keyIterator;
+private Iterator keyIterator;
 private Iterator partitionIterator;
 
 public EntryIterator() {
   keyIterator = partitions.keySet().iterator();
-  K curKey = partitions.firstKey();
-  partitionIterator = getPartition(curKey).iterator();
+ 
+  if (!keyIterator.hasNext()) {
+partitionIterator = null;
+return;
+  }
+
+  K firstKey = keyIterator.next();
+  partitionIterator = partitions.get(firstKey).iterator();
 }
 
 @Override
 public boolean hasNext() {
+
+  // Special case: an iterator was created for an empty PartitionedGSet.
+  // Check whether new partitions have been added since then.
+  if (partitionIterator == null) {
+if (partitions.size() == 0) {
+  return false;
+} else {
+  keyIterator = partitions.keySet().iterator();
+  K nextKey = keyIterator.next();
+  partitionIterator = partitions.get(nextKey).iterator();
+}
+  }
+
   while(!partitionIterator.hasNext()) {
 if(!keyIterator.hasNext()) {
   return false;
@@ -298,9 +316,8 @@ public class PartitionedGSet implements 
GSet {
 
 @Override
 public E next() {
-  while(!partitionIterator.hasNext()) {
-K curKey = keyIterator.next();
-partitionIterator = getPartition(curKey).iterator();
+  if (!hasNext()) {
+throw new NoSuchElementException("No more elements in this set.");
   }
   return partitionIterator.next();
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPartitionedGSet.java
new file mode 100644
index 000..9ae772c
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/h

[hadoop] 01/06: INodeMap with PartitionedGSet and per-partition locking.

2021-08-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit a30e9f6663fc9123621adfd2c5de94bef0a45f97
Author: Konstantin V Shvachko 
AuthorDate: Fri May 7 17:47:37 2021 -0700

INodeMap with PartitionedGSet and per-partition locking.
---
 .../java/org/apache/hadoop/util/LatchLock.java |  64 +
 .../org/apache/hadoop/util/PartitionedGSet.java| 263 +
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |  92 ++-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hadoop/hdfs/server/namenode/FSImage.java   |  29 ++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |   9 +-
 .../hdfs/server/namenode/FSNamesystemLock.java |  96 +++-
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 148 ++--
 .../java/org/apache/hadoop/hdfs/DFSTestUtil.java   |   2 +
 .../hadoop/hdfs/server/namenode/TestINodeFile.java |  39 ++-
 10 files changed, 682 insertions(+), 62 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
new file mode 100644
index 000..41e33da
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+/**
+ * LatchLock controls two hierarchical Read/Write locks:
+ * the topLock and the childLock.
+ * Typically an operation starts with the topLock already acquired.
+ * To acquire child lock LatchLock will
+ * first acquire the childLock, and then release the topLock.
+ */
+public abstract class LatchLock {
+  // Interfaces methods to be defined for subclasses
+  /** @return true topLock is locked for read by any thread */
+  protected abstract boolean isReadTopLocked();
+  /** @return true topLock is locked for write by any thread */
+  protected abstract boolean isWriteTopLocked();
+  protected abstract void readTopdUnlock();
+  protected abstract void writeTopUnlock();
+
+  protected abstract boolean hasReadChildLock();
+  protected abstract void readChildLock();
+  protected abstract void readChildUnlock();
+
+  protected abstract boolean hasWriteChildLock();
+  protected abstract void writeChildLock();
+  protected abstract void writeChildUnlock();
+
+  protected abstract LatchLock clone();
+
+  // Public APIs to use with the class
+  public void readLock() {
+readChildLock();
+readTopdUnlock();
+  }
+
+  public void readUnlock() {
+readChildUnlock();
+  }
+
+  public void writeLock() {
+writeChildLock();
+writeTopUnlock();
+  }
+
+  public void writeUnlock() {
+writeChildUnlock();
+  }
+}
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
new file mode 100644
index 000..4b0cdc9
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -0,0 +1,263 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterat

[hadoop] 02/06: Add namespace key for INode. (shv)

2021-08-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 455e8c019184d5d3ae7bcff4d29d9baa7aff3663
Author: Konstantin V Shvachko 
AuthorDate: Fri May 7 17:51:58 2021 -0700

Add namespace key for INode. (shv)
---
 .../org/apache/hadoop/util/PartitionedGSet.java| 80 ++
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |  3 +
 .../apache/hadoop/hdfs/server/namenode/INode.java  | 40 ++-
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 71 +--
 4 files changed, 176 insertions(+), 18 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
index 4b0cdc9..7ebb1b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -22,6 +22,7 @@ import java.util.Comparator;
 import java.util.Iterator;
 import java.util.Map.Entry;
 import java.util.NavigableMap;
+import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -44,7 +45,8 @@ import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
 @InterfaceAudience.Private
 public class PartitionedGSet implements GSet {
 
-  private static final int DEFAULT_PARTITION_CAPACITY = 2027;
+  private static final int DEFAULT_PARTITION_CAPACITY = 65536; // 4096; // 
5120; // 2048; // 1027;
+  private static final float DEFAULT_PARTITION_OVERFLOW = 1.8f;
 
   /**
* An ordered map of contiguous segments of elements.
@@ -81,8 +83,11 @@ public class PartitionedGSet implements 
GSet {
   final E rootKey) {
 this.partitions = new TreeMap(comparator);
 this.latchLock = latchLock;
-addNewPartition(rootKey).put(rootKey);
-this.size = 1;
+// addNewPartition(rootKey).put(rootKey);
+// this.size = 1;
+this.size = 0;
+LOG.info("Partition capacity = {}", DEFAULT_PARTITION_CAPACITY);
+LOG.info("Partition overflow factor = {}", DEFAULT_PARTITION_OVERFLOW);
   }
 
   /**
@@ -90,16 +95,19 @@ public class PartitionedGSet implements 
GSet {
* @param key
* @return
*/
-  private PartitionEntry addNewPartition(final K key) {
+  public PartitionEntry addNewPartition(final K key) {
+Entry lastEntry = partitions.lastEntry();
 PartitionEntry lastPart = null;
-if(size > 0)
-  lastPart = partitions.lastEntry().getValue();
+if(lastEntry != null)
+  lastPart = lastEntry.getValue();
 
 PartitionEntry newPart =
 new PartitionEntry(DEFAULT_PARTITION_CAPACITY);
 // assert size == 0 || newPart.partLock.isWriteTopLocked() :
 //  "Must hold write Lock: key = " + key;
-partitions.put(key, newPart);
+PartitionEntry oldPart = partitions.put(key, newPart);
+assert oldPart == null :
+  "RangeMap already has a partition associated with " + key;
 
 LOG.debug("Total GSet size = {}", size);
 LOG.debug("Number of partitions = {}", partitions.size());
@@ -173,7 +181,7 @@ public class PartitionedGSet implements 
GSet {
 
   private PartitionEntry addNewPartitionIfNeeded(
   PartitionEntry curPart, K key) {
-if(curPart.size() < DEFAULT_PARTITION_CAPACITY * 1.1
+if(curPart.size() < DEFAULT_PARTITION_CAPACITY * DEFAULT_PARTITION_OVERFLOW
 || curPart.contains(key)) {
   return curPart;
 }
@@ -197,12 +205,56 @@ public class PartitionedGSet implements 
GSet {
   public void clear() {
 LOG.error("Total GSet size = {}", size);
 LOG.error("Number of partitions = {}", partitions.size());
+printStats();
 // assert latchLock.hasWriteTopLock() : "Must hold write topLock";
 // SHV May need to clear all partitions?
 partitions.clear();
 size = 0;
   }
 
+  private void printStats() {
+int partSizeMin = Integer.MAX_VALUE, partSizeAvg = 0, partSizeMax = 0;
+long totalSize = 0;
+int numEmptyPartitions = 0, numFullPartitions = 0;
+Collection parts = partitions.values();
+Set> entries = partitions.entrySet();
+int i = 0;
+for(Entry e : entries) {
+  PartitionEntry part = e.getValue();
+  int s = part.size;
+  if(s == 0) numEmptyPartitions++;
+  if(s > DEFAULT_PARTITION_CAPACITY) numFullPartitions++;
+  totalSize += s;
+  partSizeMin = (s < partSizeMin ? s : partSizeMin);
+  partSizeMax = (partSizeMax < s ? s : partSizeMax);
+  Class inodeClass = e.getKey().getClass();
+  try {
+long[] key = (long[]) inodeClass.
+getMethod("getNamespaceKey", int.class).invoke(e.getKey(), 2);
+long[] firstKey = new long[0];
+if(part.iter

[hadoop] branch fgl updated (0f1a9a8 -> 892fa48)

2021-08-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a change to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


omit 0f1a9a8  HDFS-16130. [FGL] Implement CREATE File with FGL. Contributed 
by Renukaprasad C. (#3205)
omit b135d26  HDFS-16128. [FGL] Added support for saving/loading an FS 
Image for PartitionedGSet. Contributed by Xing Lin. (#3201)
omit 6ff40a0  HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. 
Contributed by Xing Lin. (#3197)
omit e00c784  Add namespace key for INode. (shv)
omit 2e3eb2d  INodeMap with PartitionedGSet and per-partition locking.
 add e31169c  MAPREDUCE-7258. HistoryServerRest.html#Task_Counters_API, 
modify the jobTaskCounters's itemName from taskcounterGroup to taskCounterGroup 
(#1808)
 add efb3fa2  YARN-10854. Support marking inactive node as untracked 
without configured include path. Contributed by Tao Yang.
 add ee466d4  HADOOP-17628. Distcp contract test is really slow with ABFS 
and S3A; timing out. (#3240)
 add 4627e9c  HADOOP-17822. fs.s3a.acl.default not working after S3A Audit 
feature (#3249)
 add ccfa072  HADOOP-17612. Upgrade Zookeeper to 3.6.3 and Curator to 5.2.0 
(#3241)
 add 8616591  HDFS-16149.Improve the parameter annotation in 
FairCallQueue#priorityLevels. (#3255)
 add a5811dd  YARN-10874. Refactor NM ContainerLaunch#getEnvDependencies's 
unit tests (#3248)
 add 10a2526  HDFS-16146. All three replicas are lost due to not adding a 
new DataN… (#3247) Contributed by Shuyan Zhang.
 add 0ba6f35  YARN-10355. Refactor NM 
ContainerLaunch.java#orderEnvByDependencies
 add 44bab51  YARN-10849 Clarify testcase documentation for 
TestServiceAM#testContainersReleasedWhenPreLaunchFails. Contributed by Szilard 
Nemeth
 add 3450522  HADOOP-17618. ABFS: Partially obfuscate SAS object IDs in 
Logs (#2845)
 add a67a0fd  YARN-10878. move TestNMSimulator off com.google (#3268)
 add 8d6a686  HADOOP-17823. S3A S3Guard tests to skip if S3-CSE are enabled 
(#3263)
 add e80b5a0  HADOOP-17816. Run optional CI for changes in C (#3238)
 add 9fe1f24  HADOOP-17808. Avoid excessive logging for interruption 
(ADDENDUM) (#3267)
 add a73b64f  HDFS-16153. Avoid evaluation of LOG.debug statement in 
QuorumJournalManager (#3269). Contributed by wangzhaohui.
 add 5e54d92  HADOOP-17837: Add unresolved endpoint value to 
UnknownHostException (#3272)
 add e85c446  HDFS-16154. TestMiniJournalCluster failing intermittently 
because of not reseting UserGroupInformation completely (#3270)
 add b0b867e  HADOOP-17837: Add unresolved endpoint value to 
UnknownHostException (ADDENDUM) (#3276)
 add 4fd97e0  HADOOP-17787. Refactor fetching of credentials in Jenkins 
(#3167)
 add 23e2a0b  HADOOP-17835. Use CuratorCache implementation instead of 
PathChildrenCache / TreeCache (#3266)
 add 4972e7a  Fix potential heap buffer overflow in hdfs.c. Contributed by 
Igor Chervatyuk.
 add 3565c94  HADOOP-17370. Upgrade commons-compress to 1.21 (#3274)
 add 0c7b951  HDFS-16151. Improve the parameter comments related to 
ProtobufRpcEngine2#Server(). (#3256)
 add 6a78834  HADOOP-17841. Remove ListenerHandle from Hadoop registry 
(#3278)
 add 77383a4  HDFS-15976. Make mkdtemp cross platform (#2908)
 new a30e9f6  INodeMap with PartitionedGSet and per-partition locking.
 new 455e8c0  Add namespace key for INode. (shv)
 new c06cd9a  HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. 
Contributed by Xing Lin. (#3197)
 new 4610e1d  HDFS-16128. [FGL] Added support for saving/loading an FS 
Image for PartitionedGSet. Contributed by Xing Lin. (#3201)
 new 88484fc  HDFS-16130. [FGL] Implement CREATE File with FGL. Contributed 
by Renukaprasad C. (#3205)
 new 892fa48  HDFS-16141. [FGL] Address permission related issues with File 
/ Directory. Contributed by Renukaprasad C. (#3205)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (0f1a9a8)
\
 N -- N -- N   refs/heads/fgl (892fa48)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 6 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 LICENSE-binary  

[hadoop] 05/05: HDFS-16130. [FGL] Implement CREATE File with FGL. Contributed by Renukaprasad C. (#3205)

2021-07-31 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 0f1a9a81d58f41d80f2925de7b0945c678e086eb
Author: Renukaprasad C 
AuthorDate: Fri Jul 23 15:24:34 2021 -0700

HDFS-16130. [FGL] Implement CREATE File with FGL. Contributed by 
Renukaprasad C. (#3205)
---
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  | 36 ++---
 .../hdfs/server/namenode/FSDirWriteFileOp.java | 94 +++---
 2 files changed, 87 insertions(+), 43 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index ef08e9e..f6febe2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -70,18 +70,7 @@ class FSDirMkdirOp {
 // create multiple inodes.
 fsn.checkFsObjectLimit();
 
-// create all missing directories along the path,
-// but don't add them to the INodeMap yet
-permissions = addImplicitUwx(permissions, permissions); // SHV !!!
-INode[] missing = createPathDirectories(fsd, iip, permissions);
-iip = iip.getExistingINodes();
-// switch the locks
-fsd.getINodeMap().latchWriteLock(iip, missing);
-// Add missing inodes to the INodeMap
-for(INode dir : missing) {
-  iip = addSingleDirectory(fsd, iip, dir, permissions);
-  assert iip != null : "iip should not be null";
-}
+iip = createMissingDirs(fsd, iip, permissions);
   }
   return fsd.getAuditFileInfo(iip);
 } finally {
@@ -89,6 +78,26 @@ class FSDirMkdirOp {
 }
   }
 
+  static INodesInPath createMissingDirs(FSDirectory fsd,
+  INodesInPath iip, PermissionStatus permissions) throws IOException {
+// create all missing directories along the path,
+// but don't add them to the INodeMap yet
+permissions = addImplicitUwx(permissions, permissions); // SHV !!!
+INode[] missing = createPathDirectories(fsd, iip, permissions);
+iip = iip.getExistingINodes();
+if (missing.length == 0) {
+  return iip;
+}
+// switch the locks
+fsd.getINodeMap().latchWriteLock(iip, missing);
+// Add missing inodes to the INodeMap
+for (INode dir : missing) {
+  iip = addSingleDirectory(fsd, iip, dir, permissions);
+  assert iip != null : "iip should not be null";
+}
+return iip;
+  }
+
   /**
* For a given absolute path, create all ancestors as directories along the
* path. All ancestors inherit their parent's permission plus an implicit
@@ -253,6 +262,9 @@ class FSDirMkdirOp {
 return dir;
   }
 
+  /**
+   * Find-out missing iNodes for the current mkdir OP.
+   */
   private static INode[] createPathDirectories(FSDirectory fsd,
   INodesInPath iip, PermissionStatus perm)
   throws IOException {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 0d9c6ae..f2cca7b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -228,6 +228,13 @@ class FSDirWriteFileOp {
 // while chooseTarget() was executing.
 LocatedBlock[] onRetryBlock = new LocatedBlock[1];
 INodesInPath iip = fsn.dir.resolvePath(null, src, fileId);
+
+INode[] missing = new INode[]{iip.getLastINode()};
+INodesInPath existing = iip.getParentINodesInPath();
+FSDirectory fsd = fsn.getFSDirectory();
+// switch the locks
+fsd.getINodeMap().latchWriteLock(existing, missing);
+
 FileState fileState = analyzeFileState(fsn, iip, fileId, clientName,
previous, onRetryBlock);
 final INodeFile pendingFile = fileState.inode;
@@ -392,8 +399,8 @@ class FSDirWriteFileOp {
 }
 fsn.checkFsObjectLimit();
 INodeFile newNode = null;
-INodesInPath parent =
-FSDirMkdirOp.createAncestorDirectories(fsd, iip, permissions);
+INodesInPath parent = FSDirMkdirOp.createMissingDirs(fsd,
+iip.getParentINodesInPath(), permissions);
 if (parent != null) {
   iip = addFile(fsd, parent, iip.getLastLocalName(), permissions,
   replication, blockSize, holder, clientMachine, shouldReplicate,
@@ -541,41 +548,22 @@ class FSDirWriteFileOp {
   FSDirectory fsd, INodesInPath existing, byte[] localName,
   PermissionStatus permissions, short replication, lon

[hadoop] 01/05: INodeMap with PartitionedGSet and per-partition locking.

2021-07-31 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 2e3eb2d79d381ff19b5358be84257067476f14f8
Author: Konstantin V Shvachko 
AuthorDate: Fri May 7 17:47:37 2021 -0700

INodeMap with PartitionedGSet and per-partition locking.
---
 .../java/org/apache/hadoop/util/LatchLock.java |  64 +
 .../org/apache/hadoop/util/PartitionedGSet.java| 263 +
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |  92 ++-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hadoop/hdfs/server/namenode/FSImage.java   |  29 ++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |   9 +-
 .../hdfs/server/namenode/FSNamesystemLock.java |  96 +++-
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 148 ++--
 .../java/org/apache/hadoop/hdfs/DFSTestUtil.java   |   2 +
 .../hadoop/hdfs/server/namenode/TestINodeFile.java |  39 ++-
 10 files changed, 682 insertions(+), 62 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
new file mode 100644
index 000..41e33da
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+/**
+ * LatchLock controls two hierarchical Read/Write locks:
+ * the topLock and the childLock.
+ * Typically an operation starts with the topLock already acquired.
+ * To acquire child lock LatchLock will
+ * first acquire the childLock, and then release the topLock.
+ */
+public abstract class LatchLock {
+  // Interfaces methods to be defined for subclasses
+  /** @return true topLock is locked for read by any thread */
+  protected abstract boolean isReadTopLocked();
+  /** @return true topLock is locked for write by any thread */
+  protected abstract boolean isWriteTopLocked();
+  protected abstract void readTopdUnlock();
+  protected abstract void writeTopUnlock();
+
+  protected abstract boolean hasReadChildLock();
+  protected abstract void readChildLock();
+  protected abstract void readChildUnlock();
+
+  protected abstract boolean hasWriteChildLock();
+  protected abstract void writeChildLock();
+  protected abstract void writeChildUnlock();
+
+  protected abstract LatchLock clone();
+
+  // Public APIs to use with the class
+  public void readLock() {
+readChildLock();
+readTopdUnlock();
+  }
+
+  public void readUnlock() {
+readChildUnlock();
+  }
+
+  public void writeLock() {
+writeChildLock();
+writeTopUnlock();
+  }
+
+  public void writeUnlock() {
+writeChildUnlock();
+  }
+}
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
new file mode 100644
index 000..4b0cdc9
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -0,0 +1,263 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterat

[hadoop] 02/05: Add namespace key for INode. (shv)

2021-07-31 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit e00c7842a0ba889af112662fb0750b8279fb38bf
Author: Konstantin V Shvachko 
AuthorDate: Fri May 7 17:51:58 2021 -0700

Add namespace key for INode. (shv)
---
 .../org/apache/hadoop/util/PartitionedGSet.java| 80 ++
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |  3 +
 .../apache/hadoop/hdfs/server/namenode/INode.java  | 40 ++-
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 71 +--
 4 files changed, 176 insertions(+), 18 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
index 4b0cdc9..7ebb1b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -22,6 +22,7 @@ import java.util.Comparator;
 import java.util.Iterator;
 import java.util.Map.Entry;
 import java.util.NavigableMap;
+import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -44,7 +45,8 @@ import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
 @InterfaceAudience.Private
 public class PartitionedGSet implements GSet {
 
-  private static final int DEFAULT_PARTITION_CAPACITY = 2027;
+  private static final int DEFAULT_PARTITION_CAPACITY = 65536; // 4096; // 
5120; // 2048; // 1027;
+  private static final float DEFAULT_PARTITION_OVERFLOW = 1.8f;
 
   /**
* An ordered map of contiguous segments of elements.
@@ -81,8 +83,11 @@ public class PartitionedGSet implements 
GSet {
   final E rootKey) {
 this.partitions = new TreeMap(comparator);
 this.latchLock = latchLock;
-addNewPartition(rootKey).put(rootKey);
-this.size = 1;
+// addNewPartition(rootKey).put(rootKey);
+// this.size = 1;
+this.size = 0;
+LOG.info("Partition capacity = {}", DEFAULT_PARTITION_CAPACITY);
+LOG.info("Partition overflow factor = {}", DEFAULT_PARTITION_OVERFLOW);
   }
 
   /**
@@ -90,16 +95,19 @@ public class PartitionedGSet implements 
GSet {
* @param key
* @return
*/
-  private PartitionEntry addNewPartition(final K key) {
+  public PartitionEntry addNewPartition(final K key) {
+Entry lastEntry = partitions.lastEntry();
 PartitionEntry lastPart = null;
-if(size > 0)
-  lastPart = partitions.lastEntry().getValue();
+if(lastEntry != null)
+  lastPart = lastEntry.getValue();
 
 PartitionEntry newPart =
 new PartitionEntry(DEFAULT_PARTITION_CAPACITY);
 // assert size == 0 || newPart.partLock.isWriteTopLocked() :
 //  "Must hold write Lock: key = " + key;
-partitions.put(key, newPart);
+PartitionEntry oldPart = partitions.put(key, newPart);
+assert oldPart == null :
+  "RangeMap already has a partition associated with " + key;
 
 LOG.debug("Total GSet size = {}", size);
 LOG.debug("Number of partitions = {}", partitions.size());
@@ -173,7 +181,7 @@ public class PartitionedGSet implements 
GSet {
 
   private PartitionEntry addNewPartitionIfNeeded(
   PartitionEntry curPart, K key) {
-if(curPart.size() < DEFAULT_PARTITION_CAPACITY * 1.1
+if(curPart.size() < DEFAULT_PARTITION_CAPACITY * DEFAULT_PARTITION_OVERFLOW
 || curPart.contains(key)) {
   return curPart;
 }
@@ -197,12 +205,56 @@ public class PartitionedGSet implements 
GSet {
   public void clear() {
 LOG.error("Total GSet size = {}", size);
 LOG.error("Number of partitions = {}", partitions.size());
+printStats();
 // assert latchLock.hasWriteTopLock() : "Must hold write topLock";
 // SHV May need to clear all partitions?
 partitions.clear();
 size = 0;
   }
 
+  private void printStats() {
+int partSizeMin = Integer.MAX_VALUE, partSizeAvg = 0, partSizeMax = 0;
+long totalSize = 0;
+int numEmptyPartitions = 0, numFullPartitions = 0;
+Collection parts = partitions.values();
+Set> entries = partitions.entrySet();
+int i = 0;
+for(Entry e : entries) {
+  PartitionEntry part = e.getValue();
+  int s = part.size;
+  if(s == 0) numEmptyPartitions++;
+  if(s > DEFAULT_PARTITION_CAPACITY) numFullPartitions++;
+  totalSize += s;
+  partSizeMin = (s < partSizeMin ? s : partSizeMin);
+  partSizeMax = (partSizeMax < s ? s : partSizeMax);
+  Class inodeClass = e.getKey().getClass();
+  try {
+long[] key = (long[]) inodeClass.
+getMethod("getNamespaceKey", int.class).invoke(e.getKey(), 2);
+long[] firstKey = new long[0];
+if(part.iter

[hadoop] 03/05: HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. Contributed by Xing Lin. (#3197)

2021-07-31 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 6ff40a0fe7bd3a05fd89d4d408a7c8210d7c0481
Author: Xing Lin 
AuthorDate: Fri Jul 16 13:04:59 2021 -0700

HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. Contributed by Xing 
Lin. (#3197)
---
 .../java/org/apache/hadoop/util/LatchLock.java |   4 +-
 .../org/apache/hadoop/util/PartitionedGSet.java|  35 ++-
 .../apache/hadoop/util/TestPartitionedGSet.java| 270 +
 .../hadoop/hdfs/server/namenode/INodeMap.java  |   4 +-
 4 files changed, 300 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
index 41e33da..fd98391 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
@@ -30,7 +30,7 @@ public abstract class LatchLock {
   protected abstract boolean isReadTopLocked();
   /** @return true topLock is locked for write by any thread */
   protected abstract boolean isWriteTopLocked();
-  protected abstract void readTopdUnlock();
+  protected abstract void readTopUnlock();
   protected abstract void writeTopUnlock();
 
   protected abstract boolean hasReadChildLock();
@@ -46,7 +46,7 @@ public abstract class LatchLock {
   // Public APIs to use with the class
   public void readLock() {
 readChildLock();
-readTopdUnlock();
+readTopUnlock();
   }
 
   public void readUnlock() {
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
index 7ebb1b3..f3569cc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -24,7 +24,7 @@ import java.util.Map.Entry;
 import java.util.NavigableMap;
 import java.util.Set;
 import java.util.TreeMap;
-
+import java.util.NoSuchElementException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
@@ -79,8 +79,7 @@ public class PartitionedGSet implements 
GSet {
 
   public PartitionedGSet(final int capacity,
   final Comparator comparator,
-  final LatchLock latchLock,
-  final E rootKey) {
+  final LatchLock latchLock) {
 this.partitions = new TreeMap(comparator);
 this.latchLock = latchLock;
 // addNewPartition(rootKey).put(rootKey);
@@ -275,17 +274,36 @@ public class PartitionedGSet implements 
GSet {
* modifying other partitions, while iterating through the current one.
*/
   private class EntryIterator implements Iterator {
-private final Iterator keyIterator;
+private Iterator keyIterator;
 private Iterator partitionIterator;
 
 public EntryIterator() {
   keyIterator = partitions.keySet().iterator();
-  K curKey = partitions.firstKey();
-  partitionIterator = getPartition(curKey).iterator();
+ 
+  if (!keyIterator.hasNext()) {
+partitionIterator = null;
+return;
+  }
+
+  K firstKey = keyIterator.next();
+  partitionIterator = partitions.get(firstKey).iterator();
 }
 
 @Override
 public boolean hasNext() {
+
+  // Special case: an iterator was created for an empty PartitionedGSet.
+  // Check whether new partitions have been added since then.
+  if (partitionIterator == null) {
+if (partitions.size() == 0) {
+  return false;
+} else {
+  keyIterator = partitions.keySet().iterator();
+  K nextKey = keyIterator.next();
+  partitionIterator = partitions.get(nextKey).iterator();
+}
+  }
+
   while(!partitionIterator.hasNext()) {
 if(!keyIterator.hasNext()) {
   return false;
@@ -298,9 +316,8 @@ public class PartitionedGSet implements 
GSet {
 
 @Override
 public E next() {
-  while(!partitionIterator.hasNext()) {
-K curKey = keyIterator.next();
-partitionIterator = getPartition(curKey).iterator();
+  if (!hasNext()) {
+throw new NoSuchElementException("No more elements in this set.");
   }
   return partitionIterator.next();
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPartitionedGSet.java
new file mode 100644
index 000..9ae772c
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/h

[hadoop] branch fgl updated (7598a68 -> 0f1a9a8)

2021-07-31 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a change to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


omit 7598a68  HDFS-16130. [FGL] Implement CREATE File with FGL. Contributed 
by Renukaprasad C. (#3205)
omit 0a73bde  HDFS-16128. [FGL] Added support for saving/loading an FS 
Image for PartitionedGSet. Contributed by Xing Lin. (#3201)
omit d6efb60  HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. 
Contributed by Xing Lin. (#3197)
omit b784277  Add namespace key for INode. (shv)
omit 6c2b39e  INodeMap with PartitionedGSet and per-partition locking.
 add 05b6a1a  YARN-10833. Set the X-FRAME-OPTIONS header for the default 
contexts. (#3203)
 add 4c35466  HADOOP-17317. [JDK 11] Upgrade dnsjava to remove illegal 
access warnings (#2442)
 add dd8e540  Addendum HADOOP-17770 WASB : Support disabling buffered reads 
in positional reads - Added the invalid SpotBugs warning to 
findbugs-exclude.xml (#3223)
 add 2f2f822  HDFS-12920. HDFS default value change (with adding time unit) 
breaks old version MR tarball work with new version (3.0) of hadoop. (#3227)
 add b7431c3  [UI2] Bump http-proxy to 1.18.1 (#2891)
 add 5d76549  HDFS-16131. Show storage type for failed volumes on namenode 
web (#3211). Contributed by  tomscut.
 add d710ec8  HDFS-16140. TestBootstrapAliasmap fails by BindException. 
(#3229)
 add 97c88c9  HADOOP-17807. Use separate src dir for platform builds (#3210)
 add b038042  HDFS-16139. Update BPServiceActor Scheduler's 
nextBlockReportTime atomically (#3228). Contributed by Viraj Jasani.
 add f813554  HADOOP-13887. Support S3 client side encryption (S3-CSE) 
using AWS-SDK (#2706)
 add fa0289b  YARN-6221. Entities missing from ATS when summary log file 
info got returned to the ATS before the domain log. Contributed by Xiaomin Zhang
 add aecfcf1  HDFS-16119. start balancer with parameters 
-hotBlockTimeInterval xxx is invalid. (#3185)
 add 10ba4cc  HADOOP-17765. ABFS: Use Unique File Paths in Tests. (#3153)
 add ae20516  HDFS-16111. Add a configuration to 
RoundRobinVolumeChoosingPolicy to avoid failed volumes at datanodes. (#3175)
 add b4a5247  YARN-9551. TestTimelineClientV2Impl.testSyncCall fails 
intermittent (#3212)
 add dac10fc  HDFS-16145. CopyListing fails with FNF exception with 
snapshot diff. (#3234)
 add fd13970  HDFS-16137.Improve the comments related to 
FairCallQueue#queues. (#3226)
 add 8d0297c  YARN-10727. ParentQueue does not validate the queue on 
removal. Contributed by Andras Gyori
 add 4eae284  HDFS-16144. Revert HDFS-15372 (Files in snapshots no longer 
see attribute provider permissions). Contributed by Stephen O'Donnell
 add b19dae8  HADOOP-17817. S3A to raise IOE if both S3-CSE and S3Guard 
enabled (#3239)
 add 1b9efe5  YARN-10790. CS Flexible AQC: Add separate parent and leaf 
template property. Contributed by Andras Gyori
 add f2b6c03  YARN-6272. 
TestAMRMClient#testAMRMClientWithContainerResourceChange fails intermittently. 
Contributed by Andras Gyory & Prabhu Joseph
 add e001f8e  HADOOP-17814. Provide fallbacks for identity/cost providers 
and backoff enable (#3230)
 add 1d03c69  HADOOP-17811: ABFS ExponentialRetryPolicy doesn't pick up 
configuration values (#3221)
 add 3c8a48e  HADOOP-17819. Add extensions to ProtobufRpcEngine 
RequestHeaderProto. Contributed by Hector Sandoval Chaverri. (#3242)
 add 683feaa  HDFS-15175. Multiple CloseOp shared block instance causes the 
standby namenode to crash when rolling editlog. Contributed by Wan Chang.
 add 6f730fd  HDFS-15936.Solve SocketTimeoutException#sendPacket() does not 
record SocketTimeout exception. (#2836)
 add d78b300  YARN-10841. Fix token reset synchronization for UAM response 
token. (#3194)
 add 54f9fff  YARN-10628. Add node usage metrics in SLS. Contributed by 
Vadaga Ananyo Rao
 add 74770c8  YARN-10663. Add runningApps stats in SLS. Contributed by 
Vadaga Ananyo Rao
 add ac0a4e7  YARN-10869. CS considers only the default 
maximum-allocation-mb/vcore property as a maximum when it creates dynamic 
queues (#3225)
 add 8f750c5  YARN-10856. Prevent ATS v2 health check REST API call if the 
ATS service itself is disabled. (#3236)
 add 13467f4  HADOOP-17815. Run CI for Centos 7 (#3231)
 add 798a083  YARN-10814. Fallback to RandomSecretProvider if the secret 
file is empty (#3206)
 add 6d77f3b  HDFS-14529. SetTimes to throw FileNotFoundException if inode 
is not found (#3243)
 add a218038  HADOOP-17139 Re-enable optimized copyFromLocal implementation 
in S3AFileSystem (#3101)
 add 266b1bd  HADOOP-17812. NPE in S3AInputStream read() after failure to 
reconnect to store (#3222)
 new 2e3eb2d  INodeMap with PartitionedGSet and per-partition locking.
 new e00c784  Add namespace key for INode. (shv)
 new 6ff40a0  HDFS-16125. [FGL] Fix the iterator for Partitioned

[hadoop] 04/05: HDFS-16128. [FGL] Added support for saving/loading an FS Image for PartitionedGSet. Contributed by Xing Lin. (#3201)

2021-07-31 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b135d26f1e53443162b1106ae56e96a766632171
Author: Xing Lin 
AuthorDate: Sat Jul 31 12:56:05 2021 -0700

HDFS-16128. [FGL] Added support for saving/loading an FS Image for 
PartitionedGSet. Contributed by Xing Lin. (#3201)
---
 .../org/apache/hadoop/util/PartitionedGSet.java|  24 +++--
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |   4 +-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  70 ++
 .../hadoop/hdfs/server/namenode/FSImage.java   |  12 +++
 .../hdfs/server/namenode/FSImageFormatPBINode.java |  11 ++-
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 105 -
 6 files changed, 168 insertions(+), 58 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
index f3569cc..f493402 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -68,7 +68,7 @@ public class PartitionedGSet implements 
GSet {
* Consists of a hash table {@link LightWeightGSet} and a lock, which
* controls access to this partition independently on the other ones.
*/
-  private class PartitionEntry extends LightWeightGSet {
+  public class PartitionEntry extends LightWeightGSet {
 private final LatchLock partLock;
 
 PartitionEntry(int defaultPartitionCapacity) {
@@ -121,7 +121,7 @@ public class PartitionedGSet implements 
GSet {
 return size;
   }
 
-  protected PartitionEntry getPartition(final K key) {
+  public PartitionEntry getPartition(final K key) {
 Entry partEntry = partitions.floorEntry(key);
 if(partEntry == null) {
   return null;
@@ -174,6 +174,10 @@ public class PartitionedGSet implements 
GSet {
 E result = part.put(element);
 if(result == null) {  // new element
   size++;
+  LOG.debug("partitionPGSet.put: added key {}, size is now {} ", key, 
size);
+} else {
+  LOG.debug("partitionPGSet.put: replaced key {}, size is now {}",
+  key, size);
 }
 return result;
   }
@@ -230,19 +234,25 @@ public class PartitionedGSet implements 
GSet {
   try {
 long[] key = (long[]) inodeClass.
 getMethod("getNamespaceKey", int.class).invoke(e.getKey(), 2);
-long[] firstKey = new long[0];
+long[] firstKey = new long[key.length];
 if(part.iterator().hasNext()) {
   Object first = part.iterator().next();
-  firstKey = (long[]) inodeClass.getMethod(
+  long[] firstKeyRef = (long[]) inodeClass.getMethod(
 "getNamespaceKey", int.class).invoke(first, 2);
   Object parent = inodeClass.
   getMethod("getParent").invoke(first);
   long parentId = (parent == null ? 0L :
 (long) inodeClass.getMethod("getId").invoke(parent));
+  for (int j=0; j < key.length; j++) {
+firstKey[j] = firstKeyRef[j];
+  }
   firstKey[0] = parentId;
 }
 LOG.error("Partition #{}\t key: {}\t size: {}\t first: {}",
 i++, key, s, firstKey);  // SHV should be info
+  } catch (NoSuchElementException ex) {
+LOG.error("iterator.next() throws NoSuchElementException.");
+throw ex;
   } catch (Exception ex) {
 LOG.error("Cannot find Method getNamespaceKey() in {}", inodeClass);
   }
@@ -250,8 +260,8 @@ public class PartitionedGSet implements 
GSet {
 partSizeAvg = (int) (totalSize / parts.size());
 LOG.error("Partition sizes: min = {}, avg = {}, max = {}, sum = {}",
 partSizeMin, partSizeAvg, partSizeMax, totalSize);
-LOG.error("Number of partitions: empty = {}, full = {}",
-numEmptyPartitions, numFullPartitions);
+LOG.error("Number of partitions: empty = {}, in-use = {}, full = {}",
+numEmptyPartitions, parts.size()-numEmptyPartitions, 
numFullPartitions);
   }
 
   @Override
@@ -277,6 +287,8 @@ public class PartitionedGSet implements 
GSet {
 private Iterator keyIterator;
 private Iterator partitionIterator;
 
+// Set partitionIterator to point to the first partition, or set it to null
+// when there is no partitions created for this PartitionedGSet.
 public EntryIterator() {
   keyIterator = partitions.keySet().iterator();
  
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.jav

[hadoop] branch branch-2.10 updated: HADOOP-17819. Add extensions to ProtobufRpcEngine RequestHeaderProto. Contributed by Hector Sandoval Chaverri. (#3242)

2021-07-28 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 2187c6d  HADOOP-17819. Add extensions to ProtobufRpcEngine 
RequestHeaderProto. Contributed by Hector Sandoval Chaverri. (#3242)
2187c6d is described below

commit 2187c6d366097a9255880e9a82ee6dcc0105d9fc
Author: hchaverr 
AuthorDate: Tue Jul 27 13:26:34 2021 -0700

HADOOP-17819. Add extensions to ProtobufRpcEngine RequestHeaderProto. 
Contributed by Hector Sandoval Chaverri. (#3242)

(cherry picked from commit 3c8a48e681babe4038e49205624e7be183b3466e)
---
 .../hadoop-common/src/main/proto/ProtobufRpcEngine.proto   | 3 +++
 1 file changed, 3 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto 
b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
index fa11313..f72cf1a 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
@@ -64,4 +64,7 @@ message RequestHeaderProto {
   
   /** protocol version of class declaring the called method */
   required uint64 clientProtocolVersion = 3;
+
+  /** protocol extensions */
+  extensions 1000 to max;
 }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-17819. Add extensions to ProtobufRpcEngine RequestHeaderProto. Contributed by Hector Sandoval Chaverri. (#3242)

2021-07-28 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 4f602cb  HADOOP-17819. Add extensions to ProtobufRpcEngine 
RequestHeaderProto. Contributed by Hector Sandoval Chaverri. (#3242)
4f602cb is described below

commit 4f602cb17bbfbd7255ba5c76a56d073b70e0ccd8
Author: hchaverr 
AuthorDate: Tue Jul 27 13:26:34 2021 -0700

HADOOP-17819. Add extensions to ProtobufRpcEngine RequestHeaderProto. 
Contributed by Hector Sandoval Chaverri. (#3242)

(cherry picked from commit 3c8a48e681babe4038e49205624e7be183b3466e)
---
 .../hadoop-common/src/main/proto/ProtobufRpcEngine.proto   | 3 +++
 1 file changed, 3 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto 
b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
index fa11313..f72cf1a 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
@@ -64,4 +64,7 @@ message RequestHeaderProto {
   
   /** protocol version of class declaring the called method */
   required uint64 clientProtocolVersion = 3;
+
+  /** protocol extensions */
+  extensions 1000 to max;
 }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HADOOP-17819. Add extensions to ProtobufRpcEngine RequestHeaderProto. Contributed by Hector Sandoval Chaverri. (#3242)

2021-07-28 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 6cc1426  HADOOP-17819. Add extensions to ProtobufRpcEngine 
RequestHeaderProto. Contributed by Hector Sandoval Chaverri. (#3242)
6cc1426 is described below

commit 6cc1426b63cdd2d865e61f7c8e6f6e20e62d1b16
Author: hchaverr 
AuthorDate: Tue Jul 27 13:26:34 2021 -0700

HADOOP-17819. Add extensions to ProtobufRpcEngine RequestHeaderProto. 
Contributed by Hector Sandoval Chaverri. (#3242)

(cherry picked from commit 3c8a48e681babe4038e49205624e7be183b3466e)
---
 .../hadoop-common/src/main/proto/ProtobufRpcEngine.proto   | 3 +++
 .../hadoop-common/src/main/proto/ProtobufRpcEngine2.proto  | 3 +++
 2 files changed, 6 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto 
b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
index fa11313..f72cf1a 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
@@ -64,4 +64,7 @@ message RequestHeaderProto {
   
   /** protocol version of class declaring the called method */
   required uint64 clientProtocolVersion = 3;
+
+  /** protocol extensions */
+  extensions 1000 to max;
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto 
b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto
index 16ee880..c3023ec 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto
+++ 
b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto
@@ -64,4 +64,7 @@ message RequestHeaderProto {
 
   /** protocol version of class declaring the called method */
   required uint64 clientProtocolVersion = 3;
+
+  /** protocol extensions */
+  extensions 1000 to max;
 }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-17819. Add extensions to ProtobufRpcEngine RequestHeaderProto. Contributed by Hector Sandoval Chaverri. (#3242)

2021-07-28 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3c8a48e  HADOOP-17819. Add extensions to ProtobufRpcEngine 
RequestHeaderProto. Contributed by Hector Sandoval Chaverri. (#3242)
3c8a48e is described below

commit 3c8a48e681babe4038e49205624e7be183b3466e
Author: hchaverr 
AuthorDate: Tue Jul 27 13:26:34 2021 -0700

HADOOP-17819. Add extensions to ProtobufRpcEngine RequestHeaderProto. 
Contributed by Hector Sandoval Chaverri. (#3242)
---
 .../hadoop-common/src/main/proto/ProtobufRpcEngine.proto   | 3 +++
 .../hadoop-common/src/main/proto/ProtobufRpcEngine2.proto  | 3 +++
 2 files changed, 6 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto 
b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
index fa11313..f72cf1a 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
@@ -64,4 +64,7 @@ message RequestHeaderProto {
   
   /** protocol version of class declaring the called method */
   required uint64 clientProtocolVersion = 3;
+
+  /** protocol extensions */
+  extensions 1000 to max;
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto 
b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto
index 16ee880..c3023ec 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto
+++ 
b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto
@@ -64,4 +64,7 @@ message RequestHeaderProto {
 
   /** protocol version of class declaring the called method */
   required uint64 clientProtocolVersion = 3;
+
+  /** protocol extensions */
+  extensions 1000 to max;
 }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 04/05: HDFS-16128. [FGL] Added support for saving/loading an FS Image for PartitionedGSet. Contributed by Xing Lin. (#3201)

2021-07-23 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 0a73bde9a0051060943fdd2d7461008f471ce30d
Author: Xing Lin 
AuthorDate: Fri Jul 23 12:58:31 2021 -0700

HDFS-16128. [FGL] Added support for saving/loading an FS Image for 
PartitionedGSet. Contributed by Xing Lin. (#3201)
---
 .../org/apache/hadoop/util/PartitionedGSet.java| 21 --
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |  4 +-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   | 83 ++
 .../hadoop/hdfs/server/namenode/FSImage.java   | 12 
 .../hdfs/server/namenode/FSImageFormatPBINode.java | 11 +--
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 77 
 6 files changed, 182 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
index f3569cc..5fe50ab 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -68,7 +68,7 @@ public class PartitionedGSet implements 
GSet {
* Consists of a hash table {@link LightWeightGSet} and a lock, which
* controls access to this partition independently on the other ones.
*/
-  private class PartitionEntry extends LightWeightGSet {
+  public class PartitionEntry extends LightWeightGSet {
 private final LatchLock partLock;
 
 PartitionEntry(int defaultPartitionCapacity) {
@@ -121,7 +121,7 @@ public class PartitionedGSet implements 
GSet {
 return size;
   }
 
-  protected PartitionEntry getPartition(final K key) {
+  public PartitionEntry getPartition(final K key) {
 Entry partEntry = partitions.floorEntry(key);
 if(partEntry == null) {
   return null;
@@ -174,6 +174,10 @@ public class PartitionedGSet implements 
GSet {
 E result = part.put(element);
 if(result == null) {  // new element
   size++;
+  LOG.debug("partitionPGSet.put: added key {}, size is now {} ", key, 
size);
+} else {
+  LOG.debug("partitionPGSet.put: replaced key {}, size is now {}",
+  key, size);
 }
 return result;
   }
@@ -230,15 +234,18 @@ public class PartitionedGSet implements 
GSet {
   try {
 long[] key = (long[]) inodeClass.
 getMethod("getNamespaceKey", int.class).invoke(e.getKey(), 2);
-long[] firstKey = new long[0];
+long[] firstKey = new long[key.length];
 if(part.iterator().hasNext()) {
   Object first = part.iterator().next();
-  firstKey = (long[]) inodeClass.getMethod(
+  long[] firstKeyRef = (long[]) inodeClass.getMethod(
 "getNamespaceKey", int.class).invoke(first, 2);
   Object parent = inodeClass.
   getMethod("getParent").invoke(first);
   long parentId = (parent == null ? 0L :
 (long) inodeClass.getMethod("getId").invoke(parent));
+  for (int j=0; j < key.length; j++) {
+firstKey[j] = firstKeyRef[j];
+  }
   firstKey[0] = parentId;
 }
 LOG.error("Partition #{}\t key: {}\t size: {}\t first: {}",
@@ -250,8 +257,8 @@ public class PartitionedGSet implements 
GSet {
 partSizeAvg = (int) (totalSize / parts.size());
 LOG.error("Partition sizes: min = {}, avg = {}, max = {}, sum = {}",
 partSizeMin, partSizeAvg, partSizeMax, totalSize);
-LOG.error("Number of partitions: empty = {}, full = {}",
-numEmptyPartitions, numFullPartitions);
+LOG.error("Number of partitions: empty = {}, in-use = {}, full = {}",
+numEmptyPartitions, parts.size()-numEmptyPartitions, 
numFullPartitions);
   }
 
   @Override
@@ -277,6 +284,8 @@ public class PartitionedGSet implements 
GSet {
 private Iterator keyIterator;
 private Iterator partitionIterator;
 
+// Set partitionIterator to point to the first partition, or set it to null
+// when there is no partitions created for this PartitionedGSet.
 public EntryIterator() {
   keyIterator = partitions.keySet().iterator();
  
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 5a40906..1c979e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -35,6 +35,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org

[hadoop] branch fgl updated (b1e2c07 -> 7598a68)

2021-07-23 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a change to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


omit b1e2c07  HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. 
Contributed by Xing Lin. (#3197)
omit 1829b58  Add namespace key for INode. (shv)
omit 0e7d216  INodeMap with PartitionedGSet and per-partition locking.
 add 6ed7670  HDFS-16067. Support Append API in NNThroughputBenchmark. 
Contributed by Renukaprasad C.
 add 0ac443b  YARN-10855. yarn logs cli fails to retrieve logs if any TFile 
is corrupt or empty. Contributed by Jim Brennan.
 add 17bf2fc  YARN-10858. [UI2] YARN-10826 breaks Queue view. (#3213)
 add e1d00ad  HADOOP-16290. Enable RpcMetrics units to be configurable 
(#3198)
 add de41ce8  HDFS-16087. Fix stuck issue in rbfbalance tool (#3141).  
Contributed by Eric Yin.
 add e634bf3  YARN-10630. [UI2] Ambiguous queue name resolution (#3214)
 add 0441efe  YARN-10860. Make max container per heartbeat configs 
refreshable. Contributed by Eric Badger.
 add dbd255f  HADOOP-17796. Upgrade jetty version to 9.4.43 (#3208)
 add 2da9b95  YARN-10657. We should make max application per queue to 
support node label. Contributed by Andras Gyori.
 add 98412ce  HADOOP-17813. Checkstyle - Allow line length: 100
 add 3a52bfc  HADOOP-17808. ipc.Client to set interrupt flag after catching 
InterruptedException (#3219)
 add aa1a5dd  YARN-10829. Support getApplications API in 
FederationClientInterceptor (#3135)
 add 63dfd84  HADOOP-17458. S3A to treat "SdkClientException: Data read has 
a different length than the expected" as EOFException (#3040)
 new 6c2b39e  INodeMap with PartitionedGSet and per-partition locking.
 new b784277  Add namespace key for INode. (shv)
 new d6efb60  HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. 
Contributed by Xing Lin. (#3197)
 new 0a73bde  HDFS-16128. [FGL] Added support for saving/loading an FS 
Image for PartitionedGSet. Contributed by Xing Lin. (#3201)
 new 7598a68  HDFS-16130. [FGL] Implement CREATE File with FGL. Contributed 
by Renukaprasad C. (#3205)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (b1e2c07)
\
 N -- N -- N   refs/heads/fgl (7598a68)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 5 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../src/main/resources/checkstyle/checkstyle.xml   |   4 +-
 .../apache/hadoop/fs/CommonConfigurationKeys.java  |   4 +-
 .../main/java/org/apache/hadoop/ipc/Client.java|   8 +-
 .../org/apache/hadoop/ipc/DecayRpcScheduler.java   |   8 +-
 .../java/org/apache/hadoop/ipc/RpcScheduler.java   |   8 +-
 .../main/java/org/apache/hadoop/ipc/Server.java|  10 +-
 .../org/apache/hadoop/ipc/metrics/RpcMetrics.java  |  38 +-
 .../org/apache/hadoop/util/PartitionedGSet.java|  21 +++-
 .../src/main/resources/core-default.xml|  15 +++
 .../src/site/markdown/Benchmarking.md  |   1 +
 .../hadoop-common/src/site/markdown/Metrics.md |   2 +
 .../test/java/org/apache/hadoop/ipc/TestRPC.java   |  69 ++-
 .../hdfs/rbfbalance/RouterDistCpProcedure.java |   1 +
 .../hdfs/rbfbalance/TestRouterDistCpProcedure.java | 120 ++
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |  40 --
 .../hdfs/server/namenode/FSDirWriteFileOp.java |  94 +-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  83 +
 .../hadoop/hdfs/server/namenode/FSImage.java   |  12 ++
 .../hdfs/server/namenode/FSImageFormatPBINode.java |  11 +-
 .../hadoop/hdfs/server/namenode/INodeMap.java  |  77 +---
 .../server/namenode/NNThroughputBenchmark.java |  52 
 .../server/namenode/TestNNThroughputBenchmark.java |  46 +++
 hadoop-project/pom.xml |   2 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java|  16 ++-
 .../java/org/apache/hadoop/fs/s3a/TestInvoker.java |  36 ++
 .../hadoop/tools/fedbalance/DistCpProcedure.java   |   4 +-
 .../tools/fedbalance/TestDistCpProcedure.java  |   6 +-
 .../protocolrecords/GetApplicationsResponse.java   |  12 ++
 .../apache/hadoop/yarn/c

[hadoop] 05/05: HDFS-16130. [FGL] Implement CREATE File with FGL. Contributed by Renukaprasad C. (#3205)

2021-07-23 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 7598a68b3bcb539a199c595491a03f64cd777e4e
Author: Renukaprasad C 
AuthorDate: Fri Jul 23 15:24:34 2021 -0700

HDFS-16130. [FGL] Implement CREATE File with FGL. Contributed by 
Renukaprasad C. (#3205)
---
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  | 36 ++---
 .../hdfs/server/namenode/FSDirWriteFileOp.java | 94 +++---
 2 files changed, 87 insertions(+), 43 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 1c979e5..4a15fd9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -70,18 +70,7 @@ class FSDirMkdirOp {
 // create multiple inodes.
 fsn.checkFsObjectLimit();
 
-// create all missing directories along the path,
-// but don't add them to the INodeMap yet
-permissions = addImplicitUwx(permissions, permissions); // SHV !!!
-INode[] missing = createPathDirectories(fsd, iip, permissions);
-iip = iip.getExistingINodes();
-// switch the locks
-fsd.getINodeMap().latchWriteLock(iip, missing);
-// Add missing inodes to the INodeMap
-for(INode dir : missing) {
-  iip = addSingleDirectory(fsd, iip, dir, permissions);
-  assert iip != null : "iip should not be null";
-}
+iip = createMissingDirs(fsd, iip, permissions);
   }
   return fsd.getAuditFileInfo(iip);
 } finally {
@@ -89,6 +78,26 @@ class FSDirMkdirOp {
 }
   }
 
+  static INodesInPath createMissingDirs(FSDirectory fsd,
+  INodesInPath iip, PermissionStatus permissions) throws IOException {
+// create all missing directories along the path,
+// but don't add them to the INodeMap yet
+permissions = addImplicitUwx(permissions, permissions); // SHV !!!
+INode[] missing = createPathDirectories(fsd, iip, permissions);
+iip = iip.getExistingINodes();
+if (missing.length == 0) {
+  return iip;
+}
+// switch the locks
+fsd.getINodeMap().latchWriteLock(iip, missing);
+// Add missing inodes to the INodeMap
+for (INode dir : missing) {
+  iip = addSingleDirectory(fsd, iip, dir, permissions);
+  assert iip != null : "iip should not be null";
+}
+return iip;
+  }
+
   /**
* For a given absolute path, create all ancestors as directories along the
* path. All ancestors inherit their parent's permission plus an implicit
@@ -253,6 +262,9 @@ class FSDirMkdirOp {
 return dir;
   }
 
+  /**
+   * Find-out missing iNodes for the current mkdir OP.
+   */
   private static INode[] createPathDirectories(FSDirectory fsd,
   INodesInPath iip, PermissionStatus perm)
   throws IOException {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 0d9c6ae..f2cca7b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -228,6 +228,13 @@ class FSDirWriteFileOp {
 // while chooseTarget() was executing.
 LocatedBlock[] onRetryBlock = new LocatedBlock[1];
 INodesInPath iip = fsn.dir.resolvePath(null, src, fileId);
+
+INode[] missing = new INode[]{iip.getLastINode()};
+INodesInPath existing = iip.getParentINodesInPath();
+FSDirectory fsd = fsn.getFSDirectory();
+// switch the locks
+fsd.getINodeMap().latchWriteLock(existing, missing);
+
 FileState fileState = analyzeFileState(fsn, iip, fileId, clientName,
previous, onRetryBlock);
 final INodeFile pendingFile = fileState.inode;
@@ -392,8 +399,8 @@ class FSDirWriteFileOp {
 }
 fsn.checkFsObjectLimit();
 INodeFile newNode = null;
-INodesInPath parent =
-FSDirMkdirOp.createAncestorDirectories(fsd, iip, permissions);
+INodesInPath parent = FSDirMkdirOp.createMissingDirs(fsd,
+iip.getParentINodesInPath(), permissions);
 if (parent != null) {
   iip = addFile(fsd, parent, iip.getLastLocalName(), permissions,
   replication, blockSize, holder, clientMachine, shouldReplicate,
@@ -541,41 +548,22 @@ class FSDirWriteFileOp {
   FSDirectory fsd, INodesInPath existing, byte[] localName,
   PermissionStatus permissions, short replication, lon

[hadoop] 03/05: HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. Contributed by Xing Lin. (#3197)

2021-07-23 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit d6efb60f498f81befc2e88c6f9f1929dd765e951
Author: Xing Lin 
AuthorDate: Fri Jul 16 13:04:59 2021 -0700

HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. Contributed by Xing 
Lin. (#3197)
---
 .../java/org/apache/hadoop/util/LatchLock.java |   4 +-
 .../org/apache/hadoop/util/PartitionedGSet.java|  35 ++-
 .../apache/hadoop/util/TestPartitionedGSet.java| 270 +
 .../hadoop/hdfs/server/namenode/INodeMap.java  |   4 +-
 4 files changed, 300 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
index 41e33da..fd98391 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
@@ -30,7 +30,7 @@ public abstract class LatchLock {
   protected abstract boolean isReadTopLocked();
   /** @return true topLock is locked for write by any thread */
   protected abstract boolean isWriteTopLocked();
-  protected abstract void readTopdUnlock();
+  protected abstract void readTopUnlock();
   protected abstract void writeTopUnlock();
 
   protected abstract boolean hasReadChildLock();
@@ -46,7 +46,7 @@ public abstract class LatchLock {
   // Public APIs to use with the class
   public void readLock() {
 readChildLock();
-readTopdUnlock();
+readTopUnlock();
   }
 
   public void readUnlock() {
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
index 7ebb1b3..f3569cc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -24,7 +24,7 @@ import java.util.Map.Entry;
 import java.util.NavigableMap;
 import java.util.Set;
 import java.util.TreeMap;
-
+import java.util.NoSuchElementException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
@@ -79,8 +79,7 @@ public class PartitionedGSet implements 
GSet {
 
   public PartitionedGSet(final int capacity,
   final Comparator comparator,
-  final LatchLock latchLock,
-  final E rootKey) {
+  final LatchLock latchLock) {
 this.partitions = new TreeMap(comparator);
 this.latchLock = latchLock;
 // addNewPartition(rootKey).put(rootKey);
@@ -275,17 +274,36 @@ public class PartitionedGSet implements 
GSet {
* modifying other partitions, while iterating through the current one.
*/
   private class EntryIterator implements Iterator {
-private final Iterator keyIterator;
+private Iterator keyIterator;
 private Iterator partitionIterator;
 
 public EntryIterator() {
   keyIterator = partitions.keySet().iterator();
-  K curKey = partitions.firstKey();
-  partitionIterator = getPartition(curKey).iterator();
+ 
+  if (!keyIterator.hasNext()) {
+partitionIterator = null;
+return;
+  }
+
+  K firstKey = keyIterator.next();
+  partitionIterator = partitions.get(firstKey).iterator();
 }
 
 @Override
 public boolean hasNext() {
+
+  // Special case: an iterator was created for an empty PartitionedGSet.
+  // Check whether new partitions have been added since then.
+  if (partitionIterator == null) {
+if (partitions.size() == 0) {
+  return false;
+} else {
+  keyIterator = partitions.keySet().iterator();
+  K nextKey = keyIterator.next();
+  partitionIterator = partitions.get(nextKey).iterator();
+}
+  }
+
   while(!partitionIterator.hasNext()) {
 if(!keyIterator.hasNext()) {
   return false;
@@ -298,9 +316,8 @@ public class PartitionedGSet implements 
GSet {
 
 @Override
 public E next() {
-  while(!partitionIterator.hasNext()) {
-K curKey = keyIterator.next();
-partitionIterator = getPartition(curKey).iterator();
+  if (!hasNext()) {
+throw new NoSuchElementException("No more elements in this set.");
   }
   return partitionIterator.next();
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPartitionedGSet.java
new file mode 100644
index 000..9ae772c
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/h

[hadoop] 01/05: INodeMap with PartitionedGSet and per-partition locking.

2021-07-23 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 6c2b39e8e418d4d54932506752828a59e6e92feb
Author: Konstantin V Shvachko 
AuthorDate: Fri May 7 17:47:37 2021 -0700

INodeMap with PartitionedGSet and per-partition locking.
---
 .../java/org/apache/hadoop/util/LatchLock.java |  64 +
 .../org/apache/hadoop/util/PartitionedGSet.java| 263 +
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |  92 ++-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hadoop/hdfs/server/namenode/FSImage.java   |  29 ++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |   9 +-
 .../hdfs/server/namenode/FSNamesystemLock.java |  96 +++-
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 148 ++--
 .../java/org/apache/hadoop/hdfs/DFSTestUtil.java   |   2 +
 .../hadoop/hdfs/server/namenode/TestINodeFile.java |  39 ++-
 10 files changed, 682 insertions(+), 62 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
new file mode 100644
index 000..41e33da
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+/**
+ * LatchLock controls two hierarchical Read/Write locks:
+ * the topLock and the childLock.
+ * Typically an operation starts with the topLock already acquired.
+ * To acquire child lock LatchLock will
+ * first acquire the childLock, and then release the topLock.
+ */
+public abstract class LatchLock {
+  // Interfaces methods to be defined for subclasses
+  /** @return true topLock is locked for read by any thread */
+  protected abstract boolean isReadTopLocked();
+  /** @return true topLock is locked for write by any thread */
+  protected abstract boolean isWriteTopLocked();
+  protected abstract void readTopdUnlock();
+  protected abstract void writeTopUnlock();
+
+  protected abstract boolean hasReadChildLock();
+  protected abstract void readChildLock();
+  protected abstract void readChildUnlock();
+
+  protected abstract boolean hasWriteChildLock();
+  protected abstract void writeChildLock();
+  protected abstract void writeChildUnlock();
+
+  protected abstract LatchLock clone();
+
+  // Public APIs to use with the class
+  public void readLock() {
+readChildLock();
+readTopdUnlock();
+  }
+
+  public void readUnlock() {
+readChildUnlock();
+  }
+
+  public void writeLock() {
+writeChildLock();
+writeTopUnlock();
+  }
+
+  public void writeUnlock() {
+writeChildUnlock();
+  }
+}
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
new file mode 100644
index 000..4b0cdc9
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -0,0 +1,263 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterat

[hadoop] 02/05: Add namespace key for INode. (shv)

2021-07-23 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b78427744d964760c36167d1dd92408aa73a5955
Author: Konstantin V Shvachko 
AuthorDate: Fri May 7 17:51:58 2021 -0700

Add namespace key for INode. (shv)
---
 .../org/apache/hadoop/util/PartitionedGSet.java| 80 ++
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |  3 +
 .../apache/hadoop/hdfs/server/namenode/INode.java  | 40 ++-
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 71 +--
 4 files changed, 176 insertions(+), 18 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
index 4b0cdc9..7ebb1b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -22,6 +22,7 @@ import java.util.Comparator;
 import java.util.Iterator;
 import java.util.Map.Entry;
 import java.util.NavigableMap;
+import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -44,7 +45,8 @@ import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
 @InterfaceAudience.Private
 public class PartitionedGSet implements GSet {
 
-  private static final int DEFAULT_PARTITION_CAPACITY = 2027;
+  private static final int DEFAULT_PARTITION_CAPACITY = 65536; // 4096; // 
5120; // 2048; // 1027;
+  private static final float DEFAULT_PARTITION_OVERFLOW = 1.8f;
 
   /**
* An ordered map of contiguous segments of elements.
@@ -81,8 +83,11 @@ public class PartitionedGSet implements 
GSet {
   final E rootKey) {
 this.partitions = new TreeMap(comparator);
 this.latchLock = latchLock;
-addNewPartition(rootKey).put(rootKey);
-this.size = 1;
+// addNewPartition(rootKey).put(rootKey);
+// this.size = 1;
+this.size = 0;
+LOG.info("Partition capacity = {}", DEFAULT_PARTITION_CAPACITY);
+LOG.info("Partition overflow factor = {}", DEFAULT_PARTITION_OVERFLOW);
   }
 
   /**
@@ -90,16 +95,19 @@ public class PartitionedGSet implements 
GSet {
* @param key
* @return
*/
-  private PartitionEntry addNewPartition(final K key) {
+  public PartitionEntry addNewPartition(final K key) {
+Entry lastEntry = partitions.lastEntry();
 PartitionEntry lastPart = null;
-if(size > 0)
-  lastPart = partitions.lastEntry().getValue();
+if(lastEntry != null)
+  lastPart = lastEntry.getValue();
 
 PartitionEntry newPart =
 new PartitionEntry(DEFAULT_PARTITION_CAPACITY);
 // assert size == 0 || newPart.partLock.isWriteTopLocked() :
 //  "Must hold write Lock: key = " + key;
-partitions.put(key, newPart);
+PartitionEntry oldPart = partitions.put(key, newPart);
+assert oldPart == null :
+  "RangeMap already has a partition associated with " + key;
 
 LOG.debug("Total GSet size = {}", size);
 LOG.debug("Number of partitions = {}", partitions.size());
@@ -173,7 +181,7 @@ public class PartitionedGSet implements 
GSet {
 
   private PartitionEntry addNewPartitionIfNeeded(
   PartitionEntry curPart, K key) {
-if(curPart.size() < DEFAULT_PARTITION_CAPACITY * 1.1
+if(curPart.size() < DEFAULT_PARTITION_CAPACITY * DEFAULT_PARTITION_OVERFLOW
 || curPart.contains(key)) {
   return curPart;
 }
@@ -197,12 +205,56 @@ public class PartitionedGSet implements 
GSet {
   public void clear() {
 LOG.error("Total GSet size = {}", size);
 LOG.error("Number of partitions = {}", partitions.size());
+printStats();
 // assert latchLock.hasWriteTopLock() : "Must hold write topLock";
 // SHV May need to clear all partitions?
 partitions.clear();
 size = 0;
   }
 
+  private void printStats() {
+int partSizeMin = Integer.MAX_VALUE, partSizeAvg = 0, partSizeMax = 0;
+long totalSize = 0;
+int numEmptyPartitions = 0, numFullPartitions = 0;
+Collection parts = partitions.values();
+Set> entries = partitions.entrySet();
+int i = 0;
+for(Entry e : entries) {
+  PartitionEntry part = e.getValue();
+  int s = part.size;
+  if(s == 0) numEmptyPartitions++;
+  if(s > DEFAULT_PARTITION_CAPACITY) numFullPartitions++;
+  totalSize += s;
+  partSizeMin = (s < partSizeMin ? s : partSizeMin);
+  partSizeMax = (partSizeMax < s ? s : partSizeMax);
+  Class inodeClass = e.getKey().getClass();
+  try {
+long[] key = (long[]) inodeClass.
+getMethod("getNamespaceKey", int.class).invoke(e.getKey(), 2);
+long[] firstKey = new long[0];
+if(part.iter

[hadoop] branch branch-2.10 updated: HADOOP-17028. ViewFS should initialize mounted target filesystems lazily. Contributed by Abhishek Das (#2260, #3218)

2021-07-20 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 99b952f  HADOOP-17028. ViewFS should initialize mounted target 
filesystems lazily. Contributed by Abhishek Das (#2260, #3218)
99b952f is described below

commit 99b952fcb49640aab05a72108653845968a611d1
Author: Abhishek Das 
AuthorDate: Tue Jul 13 12:47:43 2021 -0700

HADOOP-17028. ViewFS should initialize mounted target filesystems lazily. 
Contributed by Abhishek Das (#2260, #3218)

(cherry picked from commit 1dd03cc4b573270dc960117c3b6c74bb78215caa)
---
 .../org/apache/hadoop/fs/viewfs/InodeTree.java | 51 ++---
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java| 85 +-
 .../java/org/apache/hadoop/fs/viewfs/ViewFs.java   | 42 ---
 .../apache/hadoop/fs/viewfs/TestViewFsConfig.java  |  7 +-
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   | 44 ++-
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java   | 76 +++
 .../apache/hadoop/fs/viewfs/TestViewFsHdfs.java| 78 
 7 files changed, 338 insertions(+), 45 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index 0a949fe..779cec8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.fs.viewfs;
 
+import com.google.common.base.Function;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
@@ -158,8 +159,10 @@ abstract class InodeTree {
   static class INodeLink extends INode {
 final boolean isMergeLink; // true if MergeLink
 final URI[] targetDirLinkList;
-final T targetFileSystem;   // file system object created from the link.
-
+private T targetFileSystem;   // file system object created from the link.
+// Function to initialize file system. Only applicable for simple links
+private Function fileSystemInitMethod;
+private final Object lock = new Object();
 /**
  * Construct a mergeLink
  */
@@ -175,12 +178,14 @@ abstract class InodeTree {
  * Construct a simple link (i.e. not a mergeLink)
  */
 INodeLink(final String pathToNode, final UserGroupInformation aUgi,
-final T targetFs, final URI aTargetDirLink) {
+Function createFileSystemMethod,
+final URI aTargetDirLink) {
   super(pathToNode, aUgi);
-  targetFileSystem = targetFs;
+  targetFileSystem = null;
   targetDirLinkList = new URI[1];
   targetDirLinkList[0] = aTargetDirLink;
   isMergeLink = false;
+  this.fileSystemInitMethod = createFileSystemMethod;
 }
 
 /**
@@ -196,6 +201,33 @@ abstract class InodeTree {
   }
   return new Path(result.toString());
 }
+
+/**
+ * Get the instance of FileSystem to use, creating one if needed.
+ * @return An Initialized instance of T
+ * @throws IOException
+ */
+public T getTargetFileSystem() throws IOException {
+  if (targetFileSystem != null) {
+return targetFileSystem;
+  }
+  // For non NFLY and MERGE links, we initialize the FileSystem when the
+  // corresponding mount path is accessed.
+  if (targetDirLinkList.length == 1) {
+synchronized (lock) {
+  if (targetFileSystem != null) {
+return targetFileSystem;
+  }
+  targetFileSystem = fileSystemInitMethod.apply(targetDirLinkList[0]);
+  if (targetFileSystem == null) {
+throw new IOException(
+"Could not initialize target File System for URI : " +
+targetDirLinkList[0]);
+  }
+}
+  }
+  return targetFileSystem;
+}
   }
 
 
@@ -258,7 +290,7 @@ abstract class InodeTree {
   getTargetFileSystem(targetsListURI), targetsListURI);
 } else {
   newLink = new INodeLink(fullPath, aUgi,
-  getTargetFileSystem(new URI(target)), new URI(target));
+  initAndGetTargetFs(), new URI(target));
 }
 curInode.addLink(iPath, newLink);
 mountPoints.add(new MountPoint(src, newLink));
@@ -267,14 +299,13 @@ abstract class InodeTree {
   /**
* Below the "public" methods of InodeTree
*/
-  
+
   /**
* The user of this class must subclass and implement the following
* 3 abstract methods.
* @throws IOException 
*/
-  protected abstract T getTargetFileSystem(final URI uri)
-throws UnsupportedFileSystemException, URISyntaxException, IOException;
+  protected abstract Function initAndGetTargetFs

[hadoop] 02/03: Add namespace key for INode. (shv)

2021-07-16 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 1829b58b77341b83c9c73f356fa479d9940f6a24
Author: Konstantin V Shvachko 
AuthorDate: Fri May 7 17:51:58 2021 -0700

Add namespace key for INode. (shv)
---
 .../org/apache/hadoop/util/PartitionedGSet.java| 80 ++
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |  3 +
 .../apache/hadoop/hdfs/server/namenode/INode.java  | 40 ++-
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 71 +--
 4 files changed, 176 insertions(+), 18 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
index 4b0cdc9..7ebb1b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -22,6 +22,7 @@ import java.util.Comparator;
 import java.util.Iterator;
 import java.util.Map.Entry;
 import java.util.NavigableMap;
+import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -44,7 +45,8 @@ import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
 @InterfaceAudience.Private
 public class PartitionedGSet implements GSet {
 
-  private static final int DEFAULT_PARTITION_CAPACITY = 2027;
+  private static final int DEFAULT_PARTITION_CAPACITY = 65536; // 4096; // 
5120; // 2048; // 1027;
+  private static final float DEFAULT_PARTITION_OVERFLOW = 1.8f;
 
   /**
* An ordered map of contiguous segments of elements.
@@ -81,8 +83,11 @@ public class PartitionedGSet implements 
GSet {
   final E rootKey) {
 this.partitions = new TreeMap(comparator);
 this.latchLock = latchLock;
-addNewPartition(rootKey).put(rootKey);
-this.size = 1;
+// addNewPartition(rootKey).put(rootKey);
+// this.size = 1;
+this.size = 0;
+LOG.info("Partition capacity = {}", DEFAULT_PARTITION_CAPACITY);
+LOG.info("Partition overflow factor = {}", DEFAULT_PARTITION_OVERFLOW);
   }
 
   /**
@@ -90,16 +95,19 @@ public class PartitionedGSet implements 
GSet {
* @param key
* @return
*/
-  private PartitionEntry addNewPartition(final K key) {
+  public PartitionEntry addNewPartition(final K key) {
+Entry lastEntry = partitions.lastEntry();
 PartitionEntry lastPart = null;
-if(size > 0)
-  lastPart = partitions.lastEntry().getValue();
+if(lastEntry != null)
+  lastPart = lastEntry.getValue();
 
 PartitionEntry newPart =
 new PartitionEntry(DEFAULT_PARTITION_CAPACITY);
 // assert size == 0 || newPart.partLock.isWriteTopLocked() :
 //  "Must hold write Lock: key = " + key;
-partitions.put(key, newPart);
+PartitionEntry oldPart = partitions.put(key, newPart);
+assert oldPart == null :
+  "RangeMap already has a partition associated with " + key;
 
 LOG.debug("Total GSet size = {}", size);
 LOG.debug("Number of partitions = {}", partitions.size());
@@ -173,7 +181,7 @@ public class PartitionedGSet implements 
GSet {
 
   private PartitionEntry addNewPartitionIfNeeded(
   PartitionEntry curPart, K key) {
-if(curPart.size() < DEFAULT_PARTITION_CAPACITY * 1.1
+if(curPart.size() < DEFAULT_PARTITION_CAPACITY * DEFAULT_PARTITION_OVERFLOW
 || curPart.contains(key)) {
   return curPart;
 }
@@ -197,12 +205,56 @@ public class PartitionedGSet implements 
GSet {
   public void clear() {
 LOG.error("Total GSet size = {}", size);
 LOG.error("Number of partitions = {}", partitions.size());
+printStats();
 // assert latchLock.hasWriteTopLock() : "Must hold write topLock";
 // SHV May need to clear all partitions?
 partitions.clear();
 size = 0;
   }
 
+  private void printStats() {
+int partSizeMin = Integer.MAX_VALUE, partSizeAvg = 0, partSizeMax = 0;
+long totalSize = 0;
+int numEmptyPartitions = 0, numFullPartitions = 0;
+Collection parts = partitions.values();
+Set> entries = partitions.entrySet();
+int i = 0;
+for(Entry e : entries) {
+  PartitionEntry part = e.getValue();
+  int s = part.size;
+  if(s == 0) numEmptyPartitions++;
+  if(s > DEFAULT_PARTITION_CAPACITY) numFullPartitions++;
+  totalSize += s;
+  partSizeMin = (s < partSizeMin ? s : partSizeMin);
+  partSizeMax = (partSizeMax < s ? s : partSizeMax);
+  Class inodeClass = e.getKey().getClass();
+  try {
+long[] key = (long[]) inodeClass.
+getMethod("getNamespaceKey", int.class).invoke(e.getKey(), 2);
+long[] firstKey = new long[0];
+if(part.iter

[hadoop] 03/03: HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. Contributed by Xing Lin. (#3197)

2021-07-16 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b1e2c07379e7d46686f570220621a5e94d05e795
Author: Xing Lin 
AuthorDate: Fri Jul 16 13:04:59 2021 -0700

HDFS-16125. [FGL] Fix the iterator for PartitionedGSet. Contributed by Xing 
Lin. (#3197)
---
 .../java/org/apache/hadoop/util/LatchLock.java |   4 +-
 .../org/apache/hadoop/util/PartitionedGSet.java|  35 ++-
 .../apache/hadoop/util/TestPartitionedGSet.java| 270 +
 .../hadoop/hdfs/server/namenode/INodeMap.java  |   4 +-
 4 files changed, 300 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
index 41e33da..fd98391 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
@@ -30,7 +30,7 @@ public abstract class LatchLock {
   protected abstract boolean isReadTopLocked();
   /** @return true topLock is locked for write by any thread */
   protected abstract boolean isWriteTopLocked();
-  protected abstract void readTopdUnlock();
+  protected abstract void readTopUnlock();
   protected abstract void writeTopUnlock();
 
   protected abstract boolean hasReadChildLock();
@@ -46,7 +46,7 @@ public abstract class LatchLock {
   // Public APIs to use with the class
   public void readLock() {
 readChildLock();
-readTopdUnlock();
+readTopUnlock();
   }
 
   public void readUnlock() {
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
index 7ebb1b3..f3569cc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -24,7 +24,7 @@ import java.util.Map.Entry;
 import java.util.NavigableMap;
 import java.util.Set;
 import java.util.TreeMap;
-
+import java.util.NoSuchElementException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
@@ -79,8 +79,7 @@ public class PartitionedGSet implements 
GSet {
 
   public PartitionedGSet(final int capacity,
   final Comparator comparator,
-  final LatchLock latchLock,
-  final E rootKey) {
+  final LatchLock latchLock) {
 this.partitions = new TreeMap(comparator);
 this.latchLock = latchLock;
 // addNewPartition(rootKey).put(rootKey);
@@ -275,17 +274,36 @@ public class PartitionedGSet implements 
GSet {
* modifying other partitions, while iterating through the current one.
*/
   private class EntryIterator implements Iterator {
-private final Iterator keyIterator;
+private Iterator keyIterator;
 private Iterator partitionIterator;
 
 public EntryIterator() {
   keyIterator = partitions.keySet().iterator();
-  K curKey = partitions.firstKey();
-  partitionIterator = getPartition(curKey).iterator();
+ 
+  if (!keyIterator.hasNext()) {
+partitionIterator = null;
+return;
+  }
+
+  K firstKey = keyIterator.next();
+  partitionIterator = partitions.get(firstKey).iterator();
 }
 
 @Override
 public boolean hasNext() {
+
+  // Special case: an iterator was created for an empty PartitionedGSet.
+  // Check whether new partitions have been added since then.
+  if (partitionIterator == null) {
+if (partitions.size() == 0) {
+  return false;
+} else {
+  keyIterator = partitions.keySet().iterator();
+  K nextKey = keyIterator.next();
+  partitionIterator = partitions.get(nextKey).iterator();
+}
+  }
+
   while(!partitionIterator.hasNext()) {
 if(!keyIterator.hasNext()) {
   return false;
@@ -298,9 +316,8 @@ public class PartitionedGSet implements 
GSet {
 
 @Override
 public E next() {
-  while(!partitionIterator.hasNext()) {
-K curKey = keyIterator.next();
-partitionIterator = getPartition(curKey).iterator();
+  if (!hasNext()) {
+throw new NoSuchElementException("No more elements in this set.");
   }
   return partitionIterator.next();
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPartitionedGSet.java
new file mode 100644
index 000..9ae772c
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/h

[hadoop] branch fgl updated (9a60e53 -> b1e2c07)

2021-07-16 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a change to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


omit 9a60e53  Add namespace key for INode. (shv)
omit 1f1a0c4  INodeMap with PartitionedGSet and per-partition locking.
 add 1b69942  HDFS-15923. RBF: Authentication failed when rename accross 
sub clusters (#2819). Contributed by zhuobin zheng.
 add 7f93349  HADOOP-17644. Add back the exceptions removed by HADOOP-17432 
for compatibility. Contributed by Quan Li.
 add 6d6766b  HADOOP-17690. Improve the log for The DecayRpcScheduler. 
Contributed by Bhavik Patel.
 add 359c0c1  HDFS-16003. ProcessReport print invalidatedBlocks should 
judge debug level at first. Contributed by lei w.
 add 8f850b4  HADOOP-17678. Add Dockerfile for Centos 7 (#2967)
 add 8d5cc98  HDFS-15997. Implement dfsadmin -provisionSnapshotTrash -all 
(#2958)
 add 9143088   HADOOP-17665  Ignore missing keystore configuration in 
reloading mechanism
 add b944084  HDFS-16007. Deserialization of ReplicaState should avoid 
throwing ArrayIndexOutOfBoundsException (#2982)
 add c80f074  HADOOP-17686. Avoid potential NPE by using Path#getParentPath 
API in hadoop-huaweicloud (#2990)
 add 29105ff  HADOOP-17683. Update commons-io to 2.8.0 (#2974)
 add 626be24  YARN-10571. Refactor dynamic queue handling logic. 
Contributed by Andras Gyori.
 add fdd20a3  HADOOP-17689. Avoid Potential NPE in org.apache.hadoop.fs 
(#3008)
 add 35ca1dc  HADOOP-17685. Fix junit deprecation warnings in hadoop-common 
module. (#2983)
 add 2c4ab72  HADOOP-16822. Provide source artifacts for hadoop-client-api. 
Contributed by Karel Kolman.
 add 2f67a26  Bump acorn (#3003)
 add 881ab4e  Bump underscore (#2997)
 add 2e58fb6  HADOOP-17693. Dockerfile for building on Centos 8 (#3006)
 add 5404ab4  MAPREDUCE-7343. Increase the job name max length in mapred 
job -list. (#2995). Contributed by Ayush Saxena.
 add d2b0675  YARN-10737: Fix typos in CapacityScheduler#schedule. (#2911)
 add e7f0e80  YARN-10761: Add more event type to RM Dispatcher event 
metrics. Contributed by Qi Zhu.
 add 344cacc  YARN-10545. Improve the readability of diagnostics log in 
yarn-ui2 web page. #2540
 add fcd4140  HDFS-15988. Stabilise HDFS Pre-Commit. (#2860). Contributed 
by Ayush Saxena.
 add d92a25b  YARN-10555. Missing access check before getAppAttempts (#2608)
 add 8891e5c  YARN-10763. Add the number of containers assigned per second 
metrics to ClusterMetrics. Contributed by chaosju.
 add eb72628  YARN-10258. Add metrics for 'ApplicationsRunning' in 
NodeManager. Contributed by ANANDA G B.
 add 110cda3  HADOOP-17703. checkcompatibility.py errors out when 
specifying annotations. (#3017)
 add 86729e1  HADOOP-17699. Remove hardcoded SunX509 usage from SSLFactory. 
(#3016)
 add acd712c  HDFS-15877. BlockReconstructionWork should resetTargets() 
before BlockManager#validateReconstructionWork return false (#2747)
 add f8b0063  Bump node-sass (#3004)
 add 2960d83  HADOOP-17426. Upgrade to hadoop-thirdparty-1.1.0. (#3024)
 add 43bf009  HDFS-15757 RBF: Improving Router Connection Management (#2651)
 add 3f5a66c  HADOOP-17663. Remove useless property 
hadoop.assemblies.version in pom file. (#3025)
 add f724792  HDFS-16018. Optimize the display of hdfs "count -e" or "count 
-t" com… (#2994)
 add e4062ad  HADOOP-17115. Replace Guava Sets usage by Hadoop's own Sets 
in hadoop-common and hadoop-tools (#2985)
 add 0d59500  HADOOP-14922. Build of Mapreduce Native Task module fails 
with unknown opcode "bswap". Contributed by Anup Halarnkar.
 add c807381  YARN-9279. Remove the old hamlet package. (#2986)
 add 43e77ca  YARN-10779. Add option to disable lowercase conversion in 
GetApplicationsRequestPBImpl and ApplicationSubmissionContextPBImpl. 
Contributed by Peter Bacsko
 add d146ab3  YARN-10766. [UI2] Bump moment-timezone to 0.5.33. Contributed 
by Andras Gyori
 add ad923ad  HADOOP-17718. Explicitly set locale in the Dockerfile. (#3034)
 add c70ee2d  HADOOP-17700. ExitUtil#halt info log should log HaltException 
(#3015)
 add 1e44bdb  YARN-7769. FS QueueManager should not create default queue at 
init. Contributed by Benjamin Teke
 add 6bb0892  YARN-10753. Document the removal of FS default queue 
creation. Contributed by Benjamin Teke
 add 2bbeae3  HDFS-15790. Make ProtobufRpcEngineProtos and 
ProtobufRpcEngineProtos2 Co-Exist (#2767)
 add 1576f81  HADOOP-17723. [build] fix the Dockerfile for ARM (#3037)
 add c665ab0  HADOOP-17670. S3AFS and ABFS to log IOStats at DEBUG mode or 
optionally at INFO level in close() (#2963)
 add 5f40003  HADOOP-17705. S3A to add Config to set AWS region (#3020)
 add 59172ad  YARN-10771. Add cluster metric for size of 
SchedulerEventQueue and RMEventQueue. Contributed by chaosju.
 add 2a206c

[hadoop] 01/03: INodeMap with PartitionedGSet and per-partition locking.

2021-07-16 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 0e7d216134cc0c93b4ed5861430ef79ce0a9c7ab
Author: Konstantin V Shvachko 
AuthorDate: Fri May 7 17:47:37 2021 -0700

INodeMap with PartitionedGSet and per-partition locking.
---
 .../java/org/apache/hadoop/util/LatchLock.java |  64 +
 .../org/apache/hadoop/util/PartitionedGSet.java| 263 +
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |  92 ++-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hadoop/hdfs/server/namenode/FSImage.java   |  29 ++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |   9 +-
 .../hdfs/server/namenode/FSNamesystemLock.java |  96 +++-
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 148 ++--
 .../java/org/apache/hadoop/hdfs/DFSTestUtil.java   |   2 +
 .../hadoop/hdfs/server/namenode/TestINodeFile.java |  39 ++-
 10 files changed, 682 insertions(+), 62 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
new file mode 100644
index 000..41e33da
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+/**
+ * LatchLock controls two hierarchical Read/Write locks:
+ * the topLock and the childLock.
+ * Typically an operation starts with the topLock already acquired.
+ * To acquire child lock LatchLock will
+ * first acquire the childLock, and then release the topLock.
+ */
+public abstract class LatchLock {
+  // Interfaces methods to be defined for subclasses
+  /** @return true topLock is locked for read by any thread */
+  protected abstract boolean isReadTopLocked();
+  /** @return true topLock is locked for write by any thread */
+  protected abstract boolean isWriteTopLocked();
+  protected abstract void readTopdUnlock();
+  protected abstract void writeTopUnlock();
+
+  protected abstract boolean hasReadChildLock();
+  protected abstract void readChildLock();
+  protected abstract void readChildUnlock();
+
+  protected abstract boolean hasWriteChildLock();
+  protected abstract void writeChildLock();
+  protected abstract void writeChildUnlock();
+
+  protected abstract LatchLock clone();
+
+  // Public APIs to use with the class
+  public void readLock() {
+readChildLock();
+readTopdUnlock();
+  }
+
+  public void readUnlock() {
+readChildUnlock();
+  }
+
+  public void writeLock() {
+writeChildLock();
+writeTopUnlock();
+  }
+
+  public void writeUnlock() {
+writeChildUnlock();
+  }
+}
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
new file mode 100644
index 000..4b0cdc9
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -0,0 +1,263 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterat

[hadoop] branch branch-3.1 updated: HADOOP-17028. ViewFS should initialize mounted target filesystems lazily. Contributed by Abhishek Das (#2260)

2021-07-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new f7f753d  HADOOP-17028. ViewFS should initialize mounted target 
filesystems lazily. Contributed by Abhishek Das (#2260)
f7f753d is described below

commit f7f753d6fd41a0ac6590e70cbef21c55d201ec2d
Author: Abhishek Das 
AuthorDate: Tue Jul 13 12:47:43 2021 -0700

HADOOP-17028. ViewFS should initialize mounted target filesystems lazily. 
Contributed by Abhishek Das (#2260)

(cherry picked from commit 1dd03cc4b573270dc960117c3b6c74bb78215caa)
---
 .../org/apache/hadoop/fs/viewfs/InodeTree.java | 56 ++
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java| 85 +-
 .../java/org/apache/hadoop/fs/viewfs/ViewFs.java   | 38 +++---
 .../apache/hadoop/fs/viewfs/TestViewFsConfig.java  |  3 +-
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   | 44 +++
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java   | 73 +++
 ...ViewFileSystemOverloadSchemeWithHdfsScheme.java |  6 +-
 .../apache/hadoop/fs/viewfs/TestViewFsHdfs.java| 78 
 ...stViewFileSystemOverloadSchemeWithDFSAdmin.java |  6 +-
 9 files changed, 339 insertions(+), 50 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index 422e733..65ca2f8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.fs.viewfs;
 
+import java.util.function.Function;
 import com.google.common.base.Preconditions;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -243,7 +244,10 @@ abstract class InodeTree {
*/
   static class INodeLink extends INode {
 final URI[] targetDirLinkList;
-final T targetFileSystem;   // file system object created from the link.
+private T targetFileSystem;   // file system object created from the link.
+// Function to initialize file system. Only applicable for simple links
+private Function fileSystemInitMethod;
+private final Object lock = new Object();
 
 /**
  * Construct a mergeLink or nfly.
@@ -259,11 +263,13 @@ abstract class InodeTree {
  * Construct a simple link (i.e. not a mergeLink).
  */
 INodeLink(final String pathToNode, final UserGroupInformation aUgi,
-final T targetFs, final URI aTargetDirLink) {
+Function createFileSystemMethod,
+final URI aTargetDirLink) {
   super(pathToNode, aUgi);
-  targetFileSystem = targetFs;
+  targetFileSystem = null;
   targetDirLinkList = new URI[1];
   targetDirLinkList[0] = aTargetDirLink;
+  this.fileSystemInitMethod = createFileSystemMethod;
 }
 
 /**
@@ -284,7 +290,30 @@ abstract class InodeTree {
   return false;
 }
 
-public T getTargetFileSystem() {
+/**
+ * Get the instance of FileSystem to use, creating one if needed.
+ * @return An Initialized instance of T
+ * @throws IOException
+ */
+public T getTargetFileSystem() throws IOException {
+  if (targetFileSystem != null) {
+return targetFileSystem;
+  }
+  // For non NFLY and MERGE links, we initialize the FileSystem when the
+  // corresponding mount path is accessed.
+  if (targetDirLinkList.length == 1) {
+synchronized (lock) {
+  if (targetFileSystem != null) {
+return targetFileSystem;
+  }
+  targetFileSystem = fileSystemInitMethod.apply(targetDirLinkList[0]);
+  if (targetFileSystem == null) {
+throw new IOException(
+"Could not initialize target File System for URI : " +
+targetDirLinkList[0]);
+  }
+}
+  }
   return targetFileSystem;
 }
   }
@@ -345,7 +374,7 @@ abstract class InodeTree {
 switch (linkType) {
 case SINGLE:
   newLink = new INodeLink(fullPath, aUgi,
-  getTargetFileSystem(new URI(target)), new URI(target));
+  initAndGetTargetFs(), new URI(target));
   break;
 case SINGLE_FALLBACK:
 case MERGE_SLASH:
@@ -371,8 +400,7 @@ abstract class InodeTree {
* 3 abstract methods.
* @throws IOException
*/
-  protected abstract T getTargetFileSystem(URI uri)
-  throws UnsupportedFileSystemException, URISyntaxException, IOException;
+  protected abstract Function initAndGetTargetFs();
 
   protected abstract T getTargetFileSystem(INodeDir dir)
   throws URISyntaxException, IOException;
@@ -568,7 +596,7 @@ abstract class InodeTree {
 if (isMergeSlash

[hadoop] branch branch-3.2 updated: HADOOP-17028. ViewFS should initialize mounted target filesystems lazily. Contributed by Abhishek Das (#2260)

2021-07-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new f983203  HADOOP-17028. ViewFS should initialize mounted target 
filesystems lazily. Contributed by Abhishek Das (#2260)
f983203 is described below

commit f9832031c2c1b1cb521a187df55a3e11baf66b66
Author: Abhishek Das 
AuthorDate: Tue Jul 13 12:47:43 2021 -0700

HADOOP-17028. ViewFS should initialize mounted target filesystems lazily. 
Contributed by Abhishek Das (#2260)

(cherry picked from commit 1dd03cc4b573270dc960117c3b6c74bb78215caa)
---
 .../org/apache/hadoop/fs/viewfs/InodeTree.java | 56 ++
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java| 85 +-
 .../java/org/apache/hadoop/fs/viewfs/ViewFs.java   | 38 +++---
 .../apache/hadoop/fs/viewfs/TestViewFsConfig.java  |  3 +-
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   | 44 +++
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java   | 73 +++
 ...ViewFileSystemOverloadSchemeWithHdfsScheme.java |  6 +-
 .../apache/hadoop/fs/viewfs/TestViewFsHdfs.java| 78 
 ...stViewFileSystemOverloadSchemeWithDFSAdmin.java |  6 +-
 9 files changed, 339 insertions(+), 50 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index 422e733..65ca2f8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.fs.viewfs;
 
+import java.util.function.Function;
 import com.google.common.base.Preconditions;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -243,7 +244,10 @@ abstract class InodeTree {
*/
   static class INodeLink extends INode {
 final URI[] targetDirLinkList;
-final T targetFileSystem;   // file system object created from the link.
+private T targetFileSystem;   // file system object created from the link.
+// Function to initialize file system. Only applicable for simple links
+private Function fileSystemInitMethod;
+private final Object lock = new Object();
 
 /**
  * Construct a mergeLink or nfly.
@@ -259,11 +263,13 @@ abstract class InodeTree {
  * Construct a simple link (i.e. not a mergeLink).
  */
 INodeLink(final String pathToNode, final UserGroupInformation aUgi,
-final T targetFs, final URI aTargetDirLink) {
+Function createFileSystemMethod,
+final URI aTargetDirLink) {
   super(pathToNode, aUgi);
-  targetFileSystem = targetFs;
+  targetFileSystem = null;
   targetDirLinkList = new URI[1];
   targetDirLinkList[0] = aTargetDirLink;
+  this.fileSystemInitMethod = createFileSystemMethod;
 }
 
 /**
@@ -284,7 +290,30 @@ abstract class InodeTree {
   return false;
 }
 
-public T getTargetFileSystem() {
+/**
+ * Get the instance of FileSystem to use, creating one if needed.
+ * @return An Initialized instance of T
+ * @throws IOException
+ */
+public T getTargetFileSystem() throws IOException {
+  if (targetFileSystem != null) {
+return targetFileSystem;
+  }
+  // For non NFLY and MERGE links, we initialize the FileSystem when the
+  // corresponding mount path is accessed.
+  if (targetDirLinkList.length == 1) {
+synchronized (lock) {
+  if (targetFileSystem != null) {
+return targetFileSystem;
+  }
+  targetFileSystem = fileSystemInitMethod.apply(targetDirLinkList[0]);
+  if (targetFileSystem == null) {
+throw new IOException(
+"Could not initialize target File System for URI : " +
+targetDirLinkList[0]);
+  }
+}
+  }
   return targetFileSystem;
 }
   }
@@ -345,7 +374,7 @@ abstract class InodeTree {
 switch (linkType) {
 case SINGLE:
   newLink = new INodeLink(fullPath, aUgi,
-  getTargetFileSystem(new URI(target)), new URI(target));
+  initAndGetTargetFs(), new URI(target));
   break;
 case SINGLE_FALLBACK:
 case MERGE_SLASH:
@@ -371,8 +400,7 @@ abstract class InodeTree {
* 3 abstract methods.
* @throws IOException
*/
-  protected abstract T getTargetFileSystem(URI uri)
-  throws UnsupportedFileSystemException, URISyntaxException, IOException;
+  protected abstract Function initAndGetTargetFs();
 
   protected abstract T getTargetFileSystem(INodeDir dir)
   throws URISyntaxException, IOException;
@@ -568,7 +596,7 @@ abstract class InodeTree {
 if (isMergeSlash

[hadoop] branch branch-3.3 updated: HADOOP-17028. ViewFS should initialize mounted target filesystems lazily. Contributed by Abhishek Das (#2260)

2021-07-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 450dae7  HADOOP-17028. ViewFS should initialize mounted target 
filesystems lazily. Contributed by Abhishek Das (#2260)
450dae7 is described below

commit 450dae73834236203f4ce1c7256b6067407c666c
Author: Abhishek Das 
AuthorDate: Tue Jul 13 12:47:43 2021 -0700

HADOOP-17028. ViewFS should initialize mounted target filesystems lazily. 
Contributed by Abhishek Das (#2260)

(cherry picked from commit 1dd03cc4b573270dc960117c3b6c74bb78215caa)
---
 .../org/apache/hadoop/fs/viewfs/InodeTree.java |  79 ++--
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java| 101 -
 .../fs/viewfs/ViewFileSystemOverloadScheme.java|  11 ++-
 .../java/org/apache/hadoop/fs/viewfs/ViewFs.java   |  38 ++--
 .../hadoop/fs/viewfs/TestRegexMountPoint.java  |  12 ++-
 .../apache/hadoop/fs/viewfs/TestViewFsConfig.java  |   3 +-
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   |  44 +
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java   |  73 +++
 ...ViewFileSystemOverloadSchemeWithHdfsScheme.java |   6 +-
 .../apache/hadoop/fs/viewfs/TestViewFsHdfs.java|  78 
 ...stViewFileSystemOverloadSchemeWithDFSAdmin.java |   6 +-
 11 files changed, 379 insertions(+), 72 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index fd7b561..9a5671c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.fs.viewfs;
 
+import java.util.function.Function;
 import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -257,7 +258,10 @@ abstract class InodeTree {
*/
   static class INodeLink extends INode {
 final URI[] targetDirLinkList;
-final T targetFileSystem;   // file system object created from the link.
+private T targetFileSystem;   // file system object created from the link.
+// Function to initialize file system. Only applicable for simple links
+private Function fileSystemInitMethod;
+private final Object lock = new Object();
 
 /**
  * Construct a mergeLink or nfly.
@@ -273,11 +277,13 @@ abstract class InodeTree {
  * Construct a simple link (i.e. not a mergeLink).
  */
 INodeLink(final String pathToNode, final UserGroupInformation aUgi,
-final T targetFs, final URI aTargetDirLink) {
+Function createFileSystemMethod,
+final URI aTargetDirLink) {
   super(pathToNode, aUgi);
-  targetFileSystem = targetFs;
+  targetFileSystem = null;
   targetDirLinkList = new URI[1];
   targetDirLinkList[0] = aTargetDirLink;
+  this.fileSystemInitMethod = createFileSystemMethod;
 }
 
 /**
@@ -298,7 +304,30 @@ abstract class InodeTree {
   return false;
 }
 
-public T getTargetFileSystem() {
+/**
+ * Get the instance of FileSystem to use, creating one if needed.
+ * @return An Initialized instance of T
+ * @throws IOException
+ */
+public T getTargetFileSystem() throws IOException {
+  if (targetFileSystem != null) {
+return targetFileSystem;
+  }
+  // For non NFLY and MERGE links, we initialize the FileSystem when the
+  // corresponding mount path is accessed.
+  if (targetDirLinkList.length == 1) {
+synchronized (lock) {
+  if (targetFileSystem != null) {
+return targetFileSystem;
+  }
+  targetFileSystem = fileSystemInitMethod.apply(targetDirLinkList[0]);
+  if (targetFileSystem == null) {
+throw new IOException(
+"Could not initialize target File System for URI : " +
+targetDirLinkList[0]);
+  }
+}
+  }
   return targetFileSystem;
 }
   }
@@ -359,7 +388,7 @@ abstract class InodeTree {
 switch (linkType) {
 case SINGLE:
   newLink = new INodeLink(fullPath, aUgi,
-  getTargetFileSystem(new URI(target)), new URI(target));
+  initAndGetTargetFs(), new URI(target));
   break;
 case SINGLE_FALLBACK:
 case MERGE_SLASH:
@@ -385,8 +414,7 @@ abstract class InodeTree {
* 3 abstract methods.
* @throws IOException
*/
-  protected abstract T getTargetFileSystem(URI uri)
-  throws UnsupportedFileSystemException, URISyntaxException, IOException;
+  protected abstract Function initAndGetTargetFs();
 
   protected 

[hadoop] branch trunk updated: HADOOP-17028. ViewFS should initialize mounted target filesystems lazily. Contributed by Abhishek Das (#2260)

2021-07-13 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1dd03cc  HADOOP-17028. ViewFS should initialize mounted target 
filesystems lazily. Contributed by Abhishek Das (#2260)
1dd03cc is described below

commit 1dd03cc4b573270dc960117c3b6c74bb78215caa
Author: Abhishek Das 
AuthorDate: Tue Jul 13 12:47:43 2021 -0700

HADOOP-17028. ViewFS should initialize mounted target filesystems lazily. 
Contributed by Abhishek Das (#2260)
---
 .../org/apache/hadoop/fs/viewfs/InodeTree.java |  79 ++--
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java| 101 -
 .../fs/viewfs/ViewFileSystemOverloadScheme.java|  11 ++-
 .../java/org/apache/hadoop/fs/viewfs/ViewFs.java   |  38 ++--
 .../hadoop/fs/viewfs/TestRegexMountPoint.java  |  12 ++-
 .../apache/hadoop/fs/viewfs/TestViewFsConfig.java  |   3 +-
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   |  44 +
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java   |  73 +++
 ...ViewFileSystemOverloadSchemeWithHdfsScheme.java |   6 +-
 .../apache/hadoop/fs/viewfs/TestViewFsHdfs.java|  78 
 ...stViewFileSystemOverloadSchemeWithDFSAdmin.java |   6 +-
 11 files changed, 379 insertions(+), 72 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index 79c323a..1b9cf67 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.fs.viewfs;
 
+import java.util.function.Function;
 import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -257,7 +258,10 @@ abstract class InodeTree {
*/
   static class INodeLink extends INode {
 final URI[] targetDirLinkList;
-final T targetFileSystem;   // file system object created from the link.
+private T targetFileSystem;   // file system object created from the link.
+// Function to initialize file system. Only applicable for simple links
+private Function fileSystemInitMethod;
+private final Object lock = new Object();
 
 /**
  * Construct a mergeLink or nfly.
@@ -273,11 +277,13 @@ abstract class InodeTree {
  * Construct a simple link (i.e. not a mergeLink).
  */
 INodeLink(final String pathToNode, final UserGroupInformation aUgi,
-final T targetFs, final URI aTargetDirLink) {
+Function createFileSystemMethod,
+final URI aTargetDirLink) {
   super(pathToNode, aUgi);
-  targetFileSystem = targetFs;
+  targetFileSystem = null;
   targetDirLinkList = new URI[1];
   targetDirLinkList[0] = aTargetDirLink;
+  this.fileSystemInitMethod = createFileSystemMethod;
 }
 
 /**
@@ -298,7 +304,30 @@ abstract class InodeTree {
   return false;
 }
 
-public T getTargetFileSystem() {
+/**
+ * Get the instance of FileSystem to use, creating one if needed.
+ * @return An Initialized instance of T
+ * @throws IOException
+ */
+public T getTargetFileSystem() throws IOException {
+  if (targetFileSystem != null) {
+return targetFileSystem;
+  }
+  // For non NFLY and MERGE links, we initialize the FileSystem when the
+  // corresponding mount path is accessed.
+  if (targetDirLinkList.length == 1) {
+synchronized (lock) {
+  if (targetFileSystem != null) {
+return targetFileSystem;
+  }
+  targetFileSystem = fileSystemInitMethod.apply(targetDirLinkList[0]);
+  if (targetFileSystem == null) {
+throw new IOException(
+"Could not initialize target File System for URI : " +
+targetDirLinkList[0]);
+  }
+}
+  }
   return targetFileSystem;
 }
   }
@@ -359,7 +388,7 @@ abstract class InodeTree {
 switch (linkType) {
 case SINGLE:
   newLink = new INodeLink(fullPath, aUgi,
-  getTargetFileSystem(new URI(target)), new URI(target));
+  initAndGetTargetFs(), new URI(target));
   break;
 case SINGLE_FALLBACK:
 case MERGE_SLASH:
@@ -385,8 +414,7 @@ abstract class InodeTree {
* 3 abstract methods.
* @throws IOException
*/
-  protected abstract T getTargetFileSystem(URI uri)
-  throws UnsupportedFileSystemException, URISyntaxException, IOException;
+  protected abstract Function initAndGetTargetFs();
 
   protected abstract T getTargetFileSystem(INodeDir dir)
   throws URISyntaxException, IOException;
@@ -58

[hadoop] branch branch-2.10 updated: HDFS-16040. RpcQueueTime metric counts requeued calls as unique events. Contributed by Simbarashe Dzinamarira.

2021-05-27 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 1493254  HDFS-16040. RpcQueueTime metric counts requeued calls as 
unique events. Contributed by Simbarashe Dzinamarira.
1493254 is described below

commit 149325484fb589d75ba09575eb904304eb7b8611
Author: Konstantin V Shvachko 
AuthorDate: Thu May 27 18:44:27 2021 -0700

HDFS-16040. RpcQueueTime metric counts requeued calls as unique events. 
Contributed by Simbarashe Dzinamarira.

(cherry picked from commit 8ce30f51f999c0a80db53a2a96b5be5505d4d151)
---
 .../main/java/org/apache/hadoop/ipc/Server.java|  1 +
 .../namenode/ha/TestConsistentReadsObserver.java   | 54 +-
 2 files changed, 54 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 4b6b0fe..4bc0a72 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -2835,6 +2835,7 @@ public abstract class Server {
  */
 // Re-queue the call and continue
 requeueCall(call);
+call = null;
 continue;
   }
   if (LOG.isDebugEnabled()) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
index f01a511..7ae388b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -30,7 +32,6 @@ import java.util.Collections;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileContext;
@@ -51,6 +52,7 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RpcScheduler;
 import org.apache.hadoop.ipc.Schedulable;
 import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -450,6 +452,56 @@ public class TestConsistentReadsObserver {
 }
   }
 
+  @Test
+  public void testRpcQueueTimeNumOpsMetrics() throws Exception {
+// 0 == not completed, 1 == succeeded, -1 == failed
+final AtomicInteger readStatus = new AtomicInteger(0);
+
+// Making an uncoordinated call, which initialize the proxy
+// to Observer node.
+dfs.getClient().getHAServiceState();
+dfs.mkdir(testPath, FsPermission.getDefault());
+assertSentTo(0);
+
+Thread reader = new Thread(new Runnable() {
+  @Override
+  public void run() {
+try {
+  // this read will block until roll and tail edits happen.
+  dfs.getFileStatus(testPath);
+  readStatus.set(1);
+} catch (IOException e) {
+  e.printStackTrace();
+  readStatus.set(-1);
+}
+  }
+});
+
+reader.start();
+// the reader is still blocking, not succeeded yet.
+assertEquals(0, readStatus.get());
+dfsCluster.rollEditLogAndTail(0);
+// wait a while for all the change to be done
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return readStatus.get() != 0;
+  }
+}, 100, 1);
+// the reader should have succeed.
+assertEquals(1, readStatus.get());
+
+final int observerIdx = 2;
+NameNode observerNN = dfsCluster.getNameNode(observerIdx);
+MetricsRecordBuilder rpcMetrics =
+getMetrics("RpcActivityForPort"
++ observerNN.getNameNodeAddress().getPort());
+long rpcQueueTimeNumOps = getLongCounter("RpcQueueTimeNumOps", rpcMetrics);
+long rpcProcessingTimeNumOps = getLongCounter("RpcProcessingTimeNumOps",
+rpcMetrics);
+a

[hadoop] branch branch-3.1 updated: HDFS-16040. RpcQueueTime metric counts requeued calls as unique events. Contributed by Simbarashe Dzinamarira.

2021-05-27 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new a357d4c  HDFS-16040. RpcQueueTime metric counts requeued calls as 
unique events. Contributed by Simbarashe Dzinamarira.
a357d4c is described below

commit a357d4ce607eaefb4ee26ff157ce88074c3e55f2
Author: Konstantin V Shvachko 
AuthorDate: Thu May 27 18:44:27 2021 -0700

HDFS-16040. RpcQueueTime metric counts requeued calls as unique events. 
Contributed by Simbarashe Dzinamarira.

(cherry picked from commit 8ce30f51f999c0a80db53a2a96b5be5505d4d151)
---
 .../main/java/org/apache/hadoop/ipc/Server.java|  1 +
 .../namenode/ha/TestConsistentReadsObserver.java   | 54 ++
 2 files changed, 55 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 265434b..cf0bebb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -2938,6 +2938,7 @@ public abstract class Server {
  */
 // Re-queue the call and continue
 requeueCall(call);
+call = null;
 continue;
   }
   if (LOG.isDebugEnabled()) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
index 854027a..18f987d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -28,6 +30,7 @@ import java.util.Collections;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -48,6 +51,7 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RpcScheduler;
 import org.apache.hadoop.ipc.Schedulable;
 import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -419,6 +423,56 @@ public class TestConsistentReadsObserver {
 }
   }
 
+  @Test
+  public void testRpcQueueTimeNumOpsMetrics() throws Exception {
+// 0 == not completed, 1 == succeeded, -1 == failed
+AtomicInteger readStatus = new AtomicInteger(0);
+
+// Making an uncoordinated call, which initialize the proxy
+// to Observer node.
+dfs.getClient().getHAServiceState();
+dfs.mkdir(testPath, FsPermission.getDefault());
+assertSentTo(0);
+
+Thread reader = new Thread(new Runnable() {
+  @Override
+  public void run() {
+try {
+  // this read will block until roll and tail edits happen.
+  dfs.getFileStatus(testPath);
+  readStatus.set(1);
+} catch (IOException e) {
+  e.printStackTrace();
+  readStatus.set(-1);
+}
+  }
+});
+
+reader.start();
+// the reader is still blocking, not succeeded yet.
+assertEquals(0, readStatus.get());
+dfsCluster.rollEditLogAndTail(0);
+// wait a while for all the change to be done
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return readStatus.get() != 0;
+  }
+}, 100, 1);
+// the reader should have succeed.
+assertEquals(1, readStatus.get());
+
+final int observerIdx = 2;
+NameNode observerNN = dfsCluster.getNameNode(observerIdx);
+MetricsRecordBuilder rpcMetrics =
+getMetrics("RpcActivityForPort"
++ observerNN.getNameNodeAddress().getPort());
+long rpcQueueTimeNumOps = getLongCounter("RpcQueueTimeNumOps", rpcMetrics);
+long rpcProcessingTimeNumOps = getLongCounter("RpcProcessingTimeNumOps",
+rpcMetrics);
+assertEquals(rpcQueueTimeN

[hadoop] branch branch-3.2 updated: HDFS-16040. RpcQueueTime metric counts requeued calls as unique events. Contributed by Simbarashe Dzinamarira.

2021-05-27 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 549987e  HDFS-16040. RpcQueueTime metric counts requeued calls as 
unique events. Contributed by Simbarashe Dzinamarira.
549987e is described below

commit 549987ed874200b00d41f30b9ec2e2e5997b5875
Author: Konstantin V Shvachko 
AuthorDate: Thu May 27 18:44:27 2021 -0700

HDFS-16040. RpcQueueTime metric counts requeued calls as unique events. 
Contributed by Simbarashe Dzinamarira.

(cherry picked from commit 8ce30f51f999c0a80db53a2a96b5be5505d4d151)
---
 .../main/java/org/apache/hadoop/ipc/Server.java|  1 +
 .../namenode/ha/TestConsistentReadsObserver.java   | 54 ++
 2 files changed, 55 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 45485c4..f152368 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -2937,6 +2937,7 @@ public abstract class Server {
  */
 // Re-queue the call and continue
 requeueCall(call);
+call = null;
 continue;
   }
   if (LOG.isDebugEnabled()) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
index 854027a..18f987d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -28,6 +30,7 @@ import java.util.Collections;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -48,6 +51,7 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RpcScheduler;
 import org.apache.hadoop.ipc.Schedulable;
 import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -419,6 +423,56 @@ public class TestConsistentReadsObserver {
 }
   }
 
+  @Test
+  public void testRpcQueueTimeNumOpsMetrics() throws Exception {
+// 0 == not completed, 1 == succeeded, -1 == failed
+AtomicInteger readStatus = new AtomicInteger(0);
+
+// Making an uncoordinated call, which initialize the proxy
+// to Observer node.
+dfs.getClient().getHAServiceState();
+dfs.mkdir(testPath, FsPermission.getDefault());
+assertSentTo(0);
+
+Thread reader = new Thread(new Runnable() {
+  @Override
+  public void run() {
+try {
+  // this read will block until roll and tail edits happen.
+  dfs.getFileStatus(testPath);
+  readStatus.set(1);
+} catch (IOException e) {
+  e.printStackTrace();
+  readStatus.set(-1);
+}
+  }
+});
+
+reader.start();
+// the reader is still blocking, not succeeded yet.
+assertEquals(0, readStatus.get());
+dfsCluster.rollEditLogAndTail(0);
+// wait a while for all the change to be done
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return readStatus.get() != 0;
+  }
+}, 100, 1);
+// the reader should have succeed.
+assertEquals(1, readStatus.get());
+
+final int observerIdx = 2;
+NameNode observerNN = dfsCluster.getNameNode(observerIdx);
+MetricsRecordBuilder rpcMetrics =
+getMetrics("RpcActivityForPort"
++ observerNN.getNameNodeAddress().getPort());
+long rpcQueueTimeNumOps = getLongCounter("RpcQueueTimeNumOps", rpcMetrics);
+long rpcProcessingTimeNumOps = getLongCounter("RpcProcessingTimeNumOps",
+rpcMetrics);
+assertEquals(rpcQueueTimeN

[hadoop] branch branch-3.3 updated: HDFS-16040. RpcQueueTime metric counts requeued calls as unique events. Contributed by Simbarashe Dzinamarira.

2021-05-27 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new c5535ca  HDFS-16040. RpcQueueTime metric counts requeued calls as 
unique events. Contributed by Simbarashe Dzinamarira.
c5535ca is described below

commit c5535caf6e03517edb19a204cf3d4477afafcf6c
Author: Konstantin V Shvachko 
AuthorDate: Thu May 27 18:44:27 2021 -0700

HDFS-16040. RpcQueueTime metric counts requeued calls as unique events. 
Contributed by Simbarashe Dzinamarira.

(cherry picked from commit 8ce30f51f999c0a80db53a2a96b5be5505d4d151)
---
 .../main/java/org/apache/hadoop/ipc/Server.java|  1 +
 .../namenode/ha/TestConsistentReadsObserver.java   | 54 ++
 2 files changed, 55 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 68d4923..bfdfaf6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -2947,6 +2947,7 @@ public abstract class Server {
  */
 // Re-queue the call and continue
 requeueCall(call);
+call = null;
 continue;
   }
   if (LOG.isDebugEnabled()) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
index 854027a..18f987d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -28,6 +30,7 @@ import java.util.Collections;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -48,6 +51,7 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RpcScheduler;
 import org.apache.hadoop.ipc.Schedulable;
 import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -419,6 +423,56 @@ public class TestConsistentReadsObserver {
 }
   }
 
+  @Test
+  public void testRpcQueueTimeNumOpsMetrics() throws Exception {
+// 0 == not completed, 1 == succeeded, -1 == failed
+AtomicInteger readStatus = new AtomicInteger(0);
+
+// Making an uncoordinated call, which initialize the proxy
+// to Observer node.
+dfs.getClient().getHAServiceState();
+dfs.mkdir(testPath, FsPermission.getDefault());
+assertSentTo(0);
+
+Thread reader = new Thread(new Runnable() {
+  @Override
+  public void run() {
+try {
+  // this read will block until roll and tail edits happen.
+  dfs.getFileStatus(testPath);
+  readStatus.set(1);
+} catch (IOException e) {
+  e.printStackTrace();
+  readStatus.set(-1);
+}
+  }
+});
+
+reader.start();
+// the reader is still blocking, not succeeded yet.
+assertEquals(0, readStatus.get());
+dfsCluster.rollEditLogAndTail(0);
+// wait a while for all the change to be done
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return readStatus.get() != 0;
+  }
+}, 100, 1);
+// the reader should have succeed.
+assertEquals(1, readStatus.get());
+
+final int observerIdx = 2;
+NameNode observerNN = dfsCluster.getNameNode(observerIdx);
+MetricsRecordBuilder rpcMetrics =
+getMetrics("RpcActivityForPort"
++ observerNN.getNameNodeAddress().getPort());
+long rpcQueueTimeNumOps = getLongCounter("RpcQueueTimeNumOps", rpcMetrics);
+long rpcProcessingTimeNumOps = getLongCounter("RpcProcessingTimeNumOps",
+rpcMetrics);
+assertEquals(rpcQueueTimeN

[hadoop] branch trunk updated: HDFS-16040. RpcQueueTime metric counts requeued calls as unique events. Contributed by Simbarashe Dzinamarira.

2021-05-27 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8ce30f5  HDFS-16040. RpcQueueTime metric counts requeued calls as 
unique events. Contributed by Simbarashe Dzinamarira.
8ce30f5 is described below

commit 8ce30f51f999c0a80db53a2a96b5be5505d4d151
Author: Konstantin V Shvachko 
AuthorDate: Thu May 27 18:44:27 2021 -0700

HDFS-16040. RpcQueueTime metric counts requeued calls as unique events. 
Contributed by Simbarashe Dzinamarira.
---
 .../main/java/org/apache/hadoop/ipc/Server.java|  1 +
 .../namenode/ha/TestConsistentReadsObserver.java   | 54 ++
 2 files changed, 55 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index d37e4a1..77d580e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -2954,6 +2954,7 @@ public abstract class Server {
  */
 // Re-queue the call and continue
 requeueCall(call);
+call = null;
 continue;
   }
   if (LOG.isDebugEnabled()) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
index 854027a..18f987d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -28,6 +30,7 @@ import java.util.Collections;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -48,6 +51,7 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RpcScheduler;
 import org.apache.hadoop.ipc.Schedulable;
 import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -419,6 +423,56 @@ public class TestConsistentReadsObserver {
 }
   }
 
+  @Test
+  public void testRpcQueueTimeNumOpsMetrics() throws Exception {
+// 0 == not completed, 1 == succeeded, -1 == failed
+AtomicInteger readStatus = new AtomicInteger(0);
+
+// Making an uncoordinated call, which initialize the proxy
+// to Observer node.
+dfs.getClient().getHAServiceState();
+dfs.mkdir(testPath, FsPermission.getDefault());
+assertSentTo(0);
+
+Thread reader = new Thread(new Runnable() {
+  @Override
+  public void run() {
+try {
+  // this read will block until roll and tail edits happen.
+  dfs.getFileStatus(testPath);
+  readStatus.set(1);
+} catch (IOException e) {
+  e.printStackTrace();
+  readStatus.set(-1);
+}
+  }
+});
+
+reader.start();
+// the reader is still blocking, not succeeded yet.
+assertEquals(0, readStatus.get());
+dfsCluster.rollEditLogAndTail(0);
+// wait a while for all the change to be done
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return readStatus.get() != 0;
+  }
+}, 100, 1);
+// the reader should have succeed.
+assertEquals(1, readStatus.get());
+
+final int observerIdx = 2;
+NameNode observerNN = dfsCluster.getNameNode(observerIdx);
+MetricsRecordBuilder rpcMetrics =
+getMetrics("RpcActivityForPort"
++ observerNN.getNameNodeAddress().getPort());
+long rpcQueueTimeNumOps = getLongCounter("RpcQueueTimeNumOps", rpcMetrics);
+long rpcProcessingTimeNumOps = getLongCounter("RpcProcessingTimeNumOps",
+rpcMetrics);
+assertEquals(rpcQueueTimeNumOps, rpcProcessingTimeNumOps);
+  }
+
   private void assertSentTo(int nnIdx) throws

[hadoop] branch branch-2.10 updated: HDFS-15915. Race condition with async edits logging due to updating txId outside of the namesystem log. Contributed by Konstantin V Shvachko.

2021-05-26 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 8715a18  HDFS-15915. Race condition with async edits logging due to 
updating txId outside of the namesystem log. Contributed by Konstantin V 
Shvachko.
8715a18 is described below

commit 8715a18f95129b482549f539322fe2241cd27ac3
Author: Konstantin V Shvachko 
AuthorDate: Wed May 26 12:07:13 2021 -0700

HDFS-15915. Race condition with async edits logging due to updating txId 
outside of the namesystem log. Contributed by Konstantin V Shvachko.

(cherry picked from commit 1abd03d68f4f236674ce929164cc460037730abb)

# Conflicts:
#   
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
#   
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
#   
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
---
 .../hdfs/server/namenode/EditLogOutputStream.java  |  11 ++
 .../hadoop/hdfs/server/namenode/FSEditLog.java |  43 ++--
 .../hdfs/server/namenode/FSEditLogAsync.java   |   7 +-
 .../hadoop/hdfs/server/namenode/JournalSet.java|  19 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java  |  36 +-
 .../hdfs/server/namenode/TestEditLogRace.java  |  65 +++
 .../hadoop/hdfs/server/namenode/ha/HATestUtil.java |  12 ++
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 121 -
 8 files changed, 276 insertions(+), 38 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
index b4ca2d6..dd931a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
@@ -24,6 +24,7 @@ import static org.apache.hadoop.util.Time.monotonicNow;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 
 /**
  * A generic abstract class to support journaling of edits logs into 
@@ -41,6 +42,16 @@ public abstract class EditLogOutputStream implements 
Closeable {
   }
 
   /**
+   * Get the last txId journalled in the stream.
+   * The txId is recorded when FSEditLogOp is written to the stream.
+   * The default implementation is dummy.
+   * JournalSet tracks the txId uniformly for all underlying streams.
+   */
+  public long getLastJournalledTxId() {
+return HdfsServerConstants.INVALID_TXID;
+  };
+
+  /**
* Write edits log operation to the stream.
* 
* @param op operation
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 0986129..783ab22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -213,7 +213,10 @@ public class FSEditLog implements LogsPurgeable {
   private static final ThreadLocal myTransactionId = new 
ThreadLocal() {
 @Override
 protected synchronized TransactionId initialValue() {
-  return new TransactionId(Long.MAX_VALUE);
+  // If an RPC call did not generate any transactions,
+  // logSync() should exit without syncing
+  // Therefore the initial value of myTransactionId should be 0
+  return new TransactionId(0L);
 }
   };
 
@@ -456,6 +459,7 @@ public class FSEditLog implements LogsPurgeable {
   // wait if an automatic sync is scheduled
   waitIfAutoSyncScheduled();
 
+  beginTransaction(op);
   // check if it is time to schedule an automatic sync
   needsSync = doEditTransaction(op);
   if (needsSync) {
@@ -470,9 +474,13 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   synchronized boolean doEditTransaction(final FSEditLogOp op) {
-long start = beginTransaction();
-op.setTransactionId(txid);
+if (LOG.isDebugEnabled()) {
+  LOG.debug("doEditTx() op=" + op + " txid=" + txid);
+}
+assert op.hasTransactionId() :
+  "Transaction id is not set for " + op + " EditLog.txId=" + txid;
 
+long start = monotonicNow();
 try {
   editLogStream.write(op);
 } catch (IOException ex) {
@@ -516,7 +524,7 @@ public class FSEditLo

[hadoop] branch branch-3.1 updated: HDFS-15915. Race condition with async edits logging due to updating txId outside of the namesystem log. Contributed by Konstantin V Shvachko.

2021-05-26 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 49490af  HDFS-15915. Race condition with async edits logging due to 
updating txId outside of the namesystem log. Contributed by Konstantin V 
Shvachko.
49490af is described below

commit 49490af23c75378871b5adf2d5f8f9eb7a511516
Author: Konstantin V Shvachko 
AuthorDate: Wed May 26 12:07:13 2021 -0700

HDFS-15915. Race condition with async edits logging due to updating txId 
outside of the namesystem log. Contributed by Konstantin V Shvachko.

(cherry picked from commit 1abd03d68f4f236674ce929164cc460037730abb)
# Conflicts:
#   
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
#   
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
---
 .../hdfs/server/namenode/EditLogOutputStream.java  |  11 ++
 .../hadoop/hdfs/server/namenode/FSEditLog.java |  39 +--
 .../hdfs/server/namenode/FSEditLogAsync.java   |   7 +-
 .../hadoop/hdfs/server/namenode/JournalSet.java|  19 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java  |  39 ++-
 .../hdfs/server/namenode/TestEditLogRace.java  |  66 +++
 .../hadoop/hdfs/server/namenode/ha/HATestUtil.java |  12 ++
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 121 -
 8 files changed, 274 insertions(+), 40 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
index 27733cf..6f43d73 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
@@ -24,6 +24,7 @@ import static org.apache.hadoop.util.Time.monotonicNow;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 
 /**
  * A generic abstract class to support journaling of edits logs into 
@@ -43,6 +44,16 @@ public abstract class EditLogOutputStream implements 
Closeable {
   }
 
   /**
+   * Get the last txId journalled in the stream.
+   * The txId is recorded when FSEditLogOp is written to the stream.
+   * The default implementation is dummy.
+   * JournalSet tracks the txId uniformly for all underlying streams.
+   */
+  public long getLastJournalledTxId() {
+return HdfsServerConstants.INVALID_TXID;
+  };
+
+  /**
* Write edits log operation to the stream.
* 
* @param op operation
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 2e7c247..e2a60fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -217,7 +217,10 @@ public class FSEditLog implements LogsPurgeable {
   private static final ThreadLocal myTransactionId = new 
ThreadLocal() {
 @Override
 protected synchronized TransactionId initialValue() {
-  return new TransactionId(Long.MAX_VALUE);
+  // If an RPC call did not generate any transactions,
+  // logSync() should exit without syncing
+  // Therefore the initial value of myTransactionId should be 0
+  return new TransactionId(0L);
 }
   };
 
@@ -461,6 +464,7 @@ public class FSEditLog implements LogsPurgeable {
   // wait if an automatic sync is scheduled
   waitIfAutoSyncScheduled();
 
+  beginTransaction(op);
   // check if it is time to schedule an automatic sync
   needsSync = doEditTransaction(op);
   if (needsSync) {
@@ -475,9 +479,11 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   synchronized boolean doEditTransaction(final FSEditLogOp op) {
-long start = beginTransaction();
-op.setTransactionId(txid);
+LOG.debug("doEditTx() op={} txid={}", op, txid);
+assert op.hasTransactionId() :
+  "Transaction id is not set for " + op + " EditLog.txId=" + txid;
 
+long start = monotonicNow();
 try {
   editLogStream.write(op);
 } catch (IOException ex) {
@@ -521,7 +527,7 @@ public class FSEditLog implements LogsPurgeable {
 return editLogStream.shouldForceSync();
   }
   
-  private long beginTransaction() {
+  protected void beginTransaction(final FSEditLogOp op) 

[hadoop] branch branch-3.2 updated: HDFS-15915. Race condition with async edits logging due to updating txId outside of the namesystem log. Contributed by Konstantin V Shvachko.

2021-05-26 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 14fd477  HDFS-15915. Race condition with async edits logging due to 
updating txId outside of the namesystem log. Contributed by Konstantin V 
Shvachko.
14fd477 is described below

commit 14fd4776ae034ba3b2d941f1db344548fbd1f42a
Author: Konstantin V Shvachko 
AuthorDate: Wed May 26 12:07:13 2021 -0700

HDFS-15915. Race condition with async edits logging due to updating txId 
outside of the namesystem log. Contributed by Konstantin V Shvachko.

(cherry picked from commit 1abd03d68f4f236674ce929164cc460037730abb)
---
 .../hdfs/server/namenode/EditLogOutputStream.java  |  11 ++
 .../hadoop/hdfs/server/namenode/FSEditLog.java |  39 +--
 .../hdfs/server/namenode/FSEditLogAsync.java   |   7 +-
 .../hadoop/hdfs/server/namenode/JournalSet.java|  19 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java  |  36 ++-
 .../hdfs/server/namenode/TestEditLogRace.java  |  66 +++-
 .../hadoop/hdfs/server/namenode/ha/HATestUtil.java |  12 +++
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 118 +
 8 files changed, 270 insertions(+), 38 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
index 27733cf..6f43d73 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
@@ -24,6 +24,7 @@ import static org.apache.hadoop.util.Time.monotonicNow;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 
 /**
  * A generic abstract class to support journaling of edits logs into 
@@ -43,6 +44,16 @@ public abstract class EditLogOutputStream implements 
Closeable {
   }
 
   /**
+   * Get the last txId journalled in the stream.
+   * The txId is recorded when FSEditLogOp is written to the stream.
+   * The default implementation is dummy.
+   * JournalSet tracks the txId uniformly for all underlying streams.
+   */
+  public long getLastJournalledTxId() {
+return HdfsServerConstants.INVALID_TXID;
+  };
+
+  /**
* Write edits log operation to the stream.
* 
* @param op operation
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 7b3f6a0..a24e5dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -217,7 +217,10 @@ public class FSEditLog implements LogsPurgeable {
   private static final ThreadLocal myTransactionId = new 
ThreadLocal() {
 @Override
 protected synchronized TransactionId initialValue() {
-  return new TransactionId(Long.MAX_VALUE);
+  // If an RPC call did not generate any transactions,
+  // logSync() should exit without syncing
+  // Therefore the initial value of myTransactionId should be 0
+  return new TransactionId(0L);
 }
   };
 
@@ -462,6 +465,7 @@ public class FSEditLog implements LogsPurgeable {
   // wait if an automatic sync is scheduled
   waitIfAutoSyncScheduled();
 
+  beginTransaction(op);
   // check if it is time to schedule an automatic sync
   needsSync = doEditTransaction(op);
   if (needsSync) {
@@ -476,9 +480,11 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   synchronized boolean doEditTransaction(final FSEditLogOp op) {
-long start = beginTransaction();
-op.setTransactionId(txid);
+LOG.debug("doEditTx() op={} txid={}", op, txid);
+assert op.hasTransactionId() :
+  "Transaction id is not set for " + op + " EditLog.txId=" + txid;
 
+long start = monotonicNow();
 try {
   editLogStream.write(op);
 } catch (IOException ex) {
@@ -522,7 +528,7 @@ public class FSEditLog implements LogsPurgeable {
 return editLogStream.shouldForceSync();
   }
   
-  private long beginTransaction() {
+  protected void beginTransaction(final FSEditLogOp op) {
 assert Thread.holdsLock(this);
 // get a new transactionId
 txid++;
@@ -532,7 +538,9 @@ public class FSEditLog implements LogsPurgeable {
 //
 TransactionId id = myTransactionId.get();
 id.txid = txid;
-return mono

[hadoop] branch branch-3.3 updated: HDFS-15915. Race condition with async edits logging due to updating txId outside of the namesystem log. Contributed by Konstantin V Shvachko.

2021-05-26 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 5308d44  HDFS-15915. Race condition with async edits logging due to 
updating txId outside of the namesystem log. Contributed by Konstantin V 
Shvachko.
5308d44 is described below

commit 5308d44be44490c618d4f40bb4bbb2fb988d9119
Author: Konstantin V Shvachko 
AuthorDate: Wed May 26 12:07:13 2021 -0700

HDFS-15915. Race condition with async edits logging due to updating txId 
outside of the namesystem log. Contributed by Konstantin V Shvachko.

(cherry picked from commit 1abd03d68f4f236674ce929164cc460037730abb)
---
 .../hdfs/server/namenode/EditLogOutputStream.java  |  11 ++
 .../hadoop/hdfs/server/namenode/FSEditLog.java |  39 +--
 .../hdfs/server/namenode/FSEditLogAsync.java   |   7 +-
 .../hadoop/hdfs/server/namenode/JournalSet.java|  19 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java  |  36 ++-
 .../hdfs/server/namenode/TestEditLogRace.java  |  66 +++-
 .../hadoop/hdfs/server/namenode/ha/HATestUtil.java |  12 +++
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 118 +
 8 files changed, 270 insertions(+), 38 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
index 27733cf..6f43d73 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
@@ -24,6 +24,7 @@ import static org.apache.hadoop.util.Time.monotonicNow;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 
 /**
  * A generic abstract class to support journaling of edits logs into 
@@ -43,6 +44,16 @@ public abstract class EditLogOutputStream implements 
Closeable {
   }
 
   /**
+   * Get the last txId journalled in the stream.
+   * The txId is recorded when FSEditLogOp is written to the stream.
+   * The default implementation is dummy.
+   * JournalSet tracks the txId uniformly for all underlying streams.
+   */
+  public long getLastJournalledTxId() {
+return HdfsServerConstants.INVALID_TXID;
+  };
+
+  /**
* Write edits log operation to the stream.
* 
* @param op operation
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 0a97315..8b34dfe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -217,7 +217,10 @@ public class FSEditLog implements LogsPurgeable {
   private static final ThreadLocal myTransactionId = new 
ThreadLocal() {
 @Override
 protected synchronized TransactionId initialValue() {
-  return new TransactionId(Long.MAX_VALUE);
+  // If an RPC call did not generate any transactions,
+  // logSync() should exit without syncing
+  // Therefore the initial value of myTransactionId should be 0
+  return new TransactionId(0L);
 }
   };
 
@@ -462,6 +465,7 @@ public class FSEditLog implements LogsPurgeable {
   // wait if an automatic sync is scheduled
   waitIfAutoSyncScheduled();
 
+  beginTransaction(op);
   // check if it is time to schedule an automatic sync
   needsSync = doEditTransaction(op);
   if (needsSync) {
@@ -476,9 +480,11 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   synchronized boolean doEditTransaction(final FSEditLogOp op) {
-long start = beginTransaction();
-op.setTransactionId(txid);
+LOG.debug("doEditTx() op={} txid={}", op, txid);
+assert op.hasTransactionId() :
+  "Transaction id is not set for " + op + " EditLog.txId=" + txid;
 
+long start = monotonicNow();
 try {
   editLogStream.write(op);
 } catch (IOException ex) {
@@ -522,7 +528,7 @@ public class FSEditLog implements LogsPurgeable {
 return editLogStream.shouldForceSync();
   }
   
-  private long beginTransaction() {
+  protected void beginTransaction(final FSEditLogOp op) {
 assert Thread.holdsLock(this);
 // get a new transactionId
 txid++;
@@ -532,7 +538,9 @@ public class FSEditLog implements LogsPurgeable {
 //
 TransactionId id = myTransactionId.get();
 id.txid = txid;
-return mono

[hadoop] branch trunk updated: HDFS-15915. Race condition with async edits logging due to updating txId outside of the namesystem log. Contributed by Konstantin V Shvachko.

2021-05-26 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1abd03d  HDFS-15915. Race condition with async edits logging due to 
updating txId outside of the namesystem log. Contributed by Konstantin V 
Shvachko.
1abd03d is described below

commit 1abd03d68f4f236674ce929164cc460037730abb
Author: Konstantin V Shvachko 
AuthorDate: Wed May 26 12:07:13 2021 -0700

HDFS-15915. Race condition with async edits logging due to updating txId 
outside of the namesystem log. Contributed by Konstantin V Shvachko.
---
 .../hdfs/server/namenode/EditLogOutputStream.java  |  11 ++
 .../hadoop/hdfs/server/namenode/FSEditLog.java |  39 +--
 .../hdfs/server/namenode/FSEditLogAsync.java   |   7 +-
 .../hadoop/hdfs/server/namenode/JournalSet.java|  19 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java  |  36 ++-
 .../hdfs/server/namenode/TestEditLogRace.java  |  66 +++-
 .../hadoop/hdfs/server/namenode/ha/HATestUtil.java |  12 +++
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 118 +
 8 files changed, 270 insertions(+), 38 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
index 27733cf..6f43d73 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
@@ -24,6 +24,7 @@ import static org.apache.hadoop.util.Time.monotonicNow;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 
 /**
  * A generic abstract class to support journaling of edits logs into 
@@ -43,6 +44,16 @@ public abstract class EditLogOutputStream implements 
Closeable {
   }
 
   /**
+   * Get the last txId journalled in the stream.
+   * The txId is recorded when FSEditLogOp is written to the stream.
+   * The default implementation is dummy.
+   * JournalSet tracks the txId uniformly for all underlying streams.
+   */
+  public long getLastJournalledTxId() {
+return HdfsServerConstants.INVALID_TXID;
+  };
+
+  /**
* Write edits log operation to the stream.
* 
* @param op operation
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 79f039b..6048457 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -218,7 +218,10 @@ public class FSEditLog implements LogsPurgeable {
   private static final ThreadLocal myTransactionId = new 
ThreadLocal() {
 @Override
 protected synchronized TransactionId initialValue() {
-  return new TransactionId(Long.MAX_VALUE);
+  // If an RPC call did not generate any transactions,
+  // logSync() should exit without syncing
+  // Therefore the initial value of myTransactionId should be 0
+  return new TransactionId(0L);
 }
   };
 
@@ -463,6 +466,7 @@ public class FSEditLog implements LogsPurgeable {
   // wait if an automatic sync is scheduled
   waitIfAutoSyncScheduled();
 
+  beginTransaction(op);
   // check if it is time to schedule an automatic sync
   needsSync = doEditTransaction(op);
   if (needsSync) {
@@ -477,9 +481,11 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   synchronized boolean doEditTransaction(final FSEditLogOp op) {
-long start = beginTransaction();
-op.setTransactionId(txid);
+LOG.debug("doEditTx() op={} txid={}", op, txid);
+assert op.hasTransactionId() :
+  "Transaction id is not set for " + op + " EditLog.txId=" + txid;
 
+long start = monotonicNow();
 try {
   editLogStream.write(op);
 } catch (IOException ex) {
@@ -523,7 +529,7 @@ public class FSEditLog implements LogsPurgeable {
 return editLogStream.shouldForceSync();
   }
   
-  private long beginTransaction() {
+  protected void beginTransaction(final FSEditLogOp op) {
 assert Thread.holdsLock(this);
 // get a new transactionId
 txid++;
@@ -533,7 +539,9 @@ public class FSEditLog implements LogsPurgeable {
 //
 TransactionId id = myTransactionId.get();
 id.txid = txid;
-return monotonicNow();
+if(op != null) {
+  op.setTransactionId(txid);
+}
   }
   
   private 

[hadoop] 02/02: Add namespace key for INode. (shv)

2021-05-10 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9a60e53259ac257fd246c0141dc0061f2f5700b8
Author: Konstantin V Shvachko 
AuthorDate: Fri May 7 17:51:58 2021 -0700

Add namespace key for INode. (shv)
---
 .../org/apache/hadoop/util/PartitionedGSet.java| 80 ++
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |  3 +
 .../apache/hadoop/hdfs/server/namenode/INode.java  | 40 ++-
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 71 +--
 4 files changed, 176 insertions(+), 18 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
index 4b0cdc9..7ebb1b3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -22,6 +22,7 @@ import java.util.Comparator;
 import java.util.Iterator;
 import java.util.Map.Entry;
 import java.util.NavigableMap;
+import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -44,7 +45,8 @@ import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
 @InterfaceAudience.Private
 public class PartitionedGSet implements GSet {
 
-  private static final int DEFAULT_PARTITION_CAPACITY = 2027;
+  private static final int DEFAULT_PARTITION_CAPACITY = 65536; // 4096; // 
5120; // 2048; // 1027;
+  private static final float DEFAULT_PARTITION_OVERFLOW = 1.8f;
 
   /**
* An ordered map of contiguous segments of elements.
@@ -81,8 +83,11 @@ public class PartitionedGSet implements 
GSet {
   final E rootKey) {
 this.partitions = new TreeMap(comparator);
 this.latchLock = latchLock;
-addNewPartition(rootKey).put(rootKey);
-this.size = 1;
+// addNewPartition(rootKey).put(rootKey);
+// this.size = 1;
+this.size = 0;
+LOG.info("Partition capacity = {}", DEFAULT_PARTITION_CAPACITY);
+LOG.info("Partition overflow factor = {}", DEFAULT_PARTITION_OVERFLOW);
   }
 
   /**
@@ -90,16 +95,19 @@ public class PartitionedGSet implements 
GSet {
* @param key
* @return
*/
-  private PartitionEntry addNewPartition(final K key) {
+  public PartitionEntry addNewPartition(final K key) {
+Entry lastEntry = partitions.lastEntry();
 PartitionEntry lastPart = null;
-if(size > 0)
-  lastPart = partitions.lastEntry().getValue();
+if(lastEntry != null)
+  lastPart = lastEntry.getValue();
 
 PartitionEntry newPart =
 new PartitionEntry(DEFAULT_PARTITION_CAPACITY);
 // assert size == 0 || newPart.partLock.isWriteTopLocked() :
 //  "Must hold write Lock: key = " + key;
-partitions.put(key, newPart);
+PartitionEntry oldPart = partitions.put(key, newPart);
+assert oldPart == null :
+  "RangeMap already has a partition associated with " + key;
 
 LOG.debug("Total GSet size = {}", size);
 LOG.debug("Number of partitions = {}", partitions.size());
@@ -173,7 +181,7 @@ public class PartitionedGSet implements 
GSet {
 
   private PartitionEntry addNewPartitionIfNeeded(
   PartitionEntry curPart, K key) {
-if(curPart.size() < DEFAULT_PARTITION_CAPACITY * 1.1
+if(curPart.size() < DEFAULT_PARTITION_CAPACITY * DEFAULT_PARTITION_OVERFLOW
 || curPart.contains(key)) {
   return curPart;
 }
@@ -197,12 +205,56 @@ public class PartitionedGSet implements 
GSet {
   public void clear() {
 LOG.error("Total GSet size = {}", size);
 LOG.error("Number of partitions = {}", partitions.size());
+printStats();
 // assert latchLock.hasWriteTopLock() : "Must hold write topLock";
 // SHV May need to clear all partitions?
 partitions.clear();
 size = 0;
   }
 
+  private void printStats() {
+int partSizeMin = Integer.MAX_VALUE, partSizeAvg = 0, partSizeMax = 0;
+long totalSize = 0;
+int numEmptyPartitions = 0, numFullPartitions = 0;
+Collection parts = partitions.values();
+Set> entries = partitions.entrySet();
+int i = 0;
+for(Entry e : entries) {
+  PartitionEntry part = e.getValue();
+  int s = part.size;
+  if(s == 0) numEmptyPartitions++;
+  if(s > DEFAULT_PARTITION_CAPACITY) numFullPartitions++;
+  totalSize += s;
+  partSizeMin = (s < partSizeMin ? s : partSizeMin);
+  partSizeMax = (partSizeMax < s ? s : partSizeMax);
+  Class inodeClass = e.getKey().getClass();
+  try {
+long[] key = (long[]) inodeClass.
+getMethod("getNamespaceKey", int.class).invoke(e.getKey(), 2);
+long[] firstKey = new long[0];
+if(part.iter

[hadoop] branch fgl created (now 9a60e53)

2021-05-10 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a change to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at 9a60e53  Add namespace key for INode. (shv)

This branch includes the following new commits:

 new 1f1a0c4  INodeMap with PartitionedGSet and per-partition locking.
 new 9a60e53  Add namespace key for INode. (shv)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: INodeMap with PartitionedGSet and per-partition locking.

2021-05-10 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 1f1a0c45fe44c3da0db9678417c4ff397a93
Author: Konstantin V Shvachko 
AuthorDate: Fri May 7 17:47:37 2021 -0700

INodeMap with PartitionedGSet and per-partition locking.
---
 .../java/org/apache/hadoop/util/LatchLock.java |  64 +
 .../org/apache/hadoop/util/PartitionedGSet.java| 263 +
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  |  92 ++-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hadoop/hdfs/server/namenode/FSImage.java   |  29 ++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |   9 +-
 .../hdfs/server/namenode/FSNamesystemLock.java |  96 +++-
 .../hadoop/hdfs/server/namenode/INodeMap.java  | 148 ++--
 .../java/org/apache/hadoop/hdfs/DFSTestUtil.java   |   2 +
 .../hadoop/hdfs/server/namenode/TestINodeFile.java |  39 ++-
 10 files changed, 682 insertions(+), 62 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
new file mode 100644
index 000..41e33da
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LatchLock.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+/**
+ * LatchLock controls two hierarchical Read/Write locks:
+ * the topLock and the childLock.
+ * Typically an operation starts with the topLock already acquired.
+ * To acquire child lock LatchLock will
+ * first acquire the childLock, and then release the topLock.
+ */
+public abstract class LatchLock {
+  // Interfaces methods to be defined for subclasses
+  /** @return true topLock is locked for read by any thread */
+  protected abstract boolean isReadTopLocked();
+  /** @return true topLock is locked for write by any thread */
+  protected abstract boolean isWriteTopLocked();
+  protected abstract void readTopdUnlock();
+  protected abstract void writeTopUnlock();
+
+  protected abstract boolean hasReadChildLock();
+  protected abstract void readChildLock();
+  protected abstract void readChildUnlock();
+
+  protected abstract boolean hasWriteChildLock();
+  protected abstract void writeChildLock();
+  protected abstract void writeChildUnlock();
+
+  protected abstract LatchLock clone();
+
+  // Public APIs to use with the class
+  public void readLock() {
+readChildLock();
+readTopdUnlock();
+  }
+
+  public void readUnlock() {
+readChildUnlock();
+  }
+
+  public void writeLock() {
+writeChildLock();
+writeTopUnlock();
+  }
+
+  public void writeUnlock() {
+writeChildUnlock();
+  }
+}
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
new file mode 100644
index 000..4b0cdc9
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PartitionedGSet.java
@@ -0,0 +1,263 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterat

[hadoop] branch branch-3.1 updated: HADOOP-17680. Allow ProtobufRpcEngine to be extensible (#2905) Contributed by Hector Chaverri.

2021-05-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 39bf9e2  HADOOP-17680. Allow ProtobufRpcEngine to be extensible 
(#2905) Contributed by Hector Chaverri.
39bf9e2 is described below

commit 39bf9e270e3a5dbcd3a7a2924f62bd0b2dee0a02
Author: hchaverr 
AuthorDate: Thu May 6 16:40:45 2021 -0700

HADOOP-17680. Allow ProtobufRpcEngine to be extensible (#2905) Contributed 
by Hector Chaverri.

(cherry picked from commit f40e3eb0590f85bb42d2471992bf5d524628fdd6)
---
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   | 30 +-
 1 file changed, 24 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 959f701..670093f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -115,7 +115,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 factory)), false);
   }
 
-  private static class Invoker implements RpcInvocationHandler {
+  protected static class Invoker implements RpcInvocationHandler {
 private final Map returnTypes = 
 new ConcurrentHashMap();
 private boolean isClosed = false;
@@ -126,7 +126,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 private AtomicBoolean fallbackToSimpleAuth;
 private AlignmentContext alignmentContext;
 
-private Invoker(Class protocol, InetSocketAddress addr,
+protected Invoker(Class protocol, InetSocketAddress addr,
 UserGroupInformation ticket, Configuration conf, SocketFactory factory,
 int rpcTimeout, RetryPolicy connectionRetryPolicy,
 AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext)
@@ -141,7 +141,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 /**
  * This constructor takes a connectionId, instead of creating a new one.
  */
-private Invoker(Class protocol, Client.ConnectionId connId,
+protected Invoker(Class protocol, Client.ConnectionId connId,
 Configuration conf, SocketFactory factory) {
   this.remoteId = connId;
   this.client = CLIENTS.getClient(conf, factory, RpcWritable.Buffer.class);
@@ -218,8 +218,6 @@ public class ProtobufRpcEngine implements RpcEngine {
 traceScope = 
tracer.newScope(RpcClientUtil.methodToTraceString(method));
   }
 
-  RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
-  
   if (LOG.isTraceEnabled()) {
 LOG.trace(Thread.currentThread().getId() + ": Call -> " +
 remoteId + ": " + method.getName() +
@@ -231,7 +229,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   final RpcWritable.Buffer val;
   try {
 val = (RpcWritable.Buffer) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
-new RpcProtobufRequest(rpcRequestHeader, theRequest), remoteId,
+constructRpcRequest(method, theRequest), remoteId,
 fallbackToSimpleAuth, alignmentContext);
 
   } catch (Throwable e) {
@@ -276,6 +274,11 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 }
 
+protected Writable constructRpcRequest(Method method, Message theRequest) {
+  RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
+  return new RpcProtobufRequest(rpcRequestHeader, theRequest);
+}
+
 private Message getReturnMessage(final Method method,
 final RpcWritable.Buffer buf) throws ServiceException {
   Message prototype = null;
@@ -325,6 +328,14 @@ public class ProtobufRpcEngine implements RpcEngine {
 public ConnectionId getConnectionId() {
   return remoteId;
 }
+
+protected long getClientProtocolVersion() {
+  return clientProtocolVersion;
+}
+
+protected String getProtocolName() {
+  return protocolName;
+}
   }
 
   @VisibleForTesting
@@ -503,6 +514,13 @@ public class ProtobufRpcEngine implements RpcEngine {
 String declaringClassProtoName = 
 rpcRequest.getDeclaringClassProtocolName();
 long clientVersion = rpcRequest.getClientProtocolVersion();
+return call(server, connectionProtocolName, request, receiveTime,
+methodName, declaringClassProtoName, clientVersion);
+  }
+
+  protected Writable call(RPC.Server server, String connectionProtocolName,
+  RpcWritable.Buffer request, long receiveTime, String methodName,
+  String declaringClassProtoName, long clientVersion) throws Exception 
{
 if (server.verbose)
   LOG.info("

[hadoop] branch branch-3.2 updated: HADOOP-17680. Allow ProtobufRpcEngine to be extensible (#2905) Contributed by Hector Chaverri.

2021-05-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new f6af5bf  HADOOP-17680. Allow ProtobufRpcEngine to be extensible 
(#2905) Contributed by Hector Chaverri.
f6af5bf is described below

commit f6af5bff9e1e9411f38038d4b0a8dbf4e0cbbac0
Author: hchaverr 
AuthorDate: Thu May 6 16:40:45 2021 -0700

HADOOP-17680. Allow ProtobufRpcEngine to be extensible (#2905) Contributed 
by Hector Chaverri.

(cherry picked from commit f40e3eb0590f85bb42d2471992bf5d524628fdd6)
---
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   | 30 +-
 1 file changed, 24 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 5db2cc1..25a4fd4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -115,7 +115,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 factory)), false);
   }
 
-  private static class Invoker implements RpcInvocationHandler {
+  protected static class Invoker implements RpcInvocationHandler {
 private final Map returnTypes = 
 new ConcurrentHashMap();
 private boolean isClosed = false;
@@ -126,7 +126,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 private AtomicBoolean fallbackToSimpleAuth;
 private AlignmentContext alignmentContext;
 
-private Invoker(Class protocol, InetSocketAddress addr,
+protected Invoker(Class protocol, InetSocketAddress addr,
 UserGroupInformation ticket, Configuration conf, SocketFactory factory,
 int rpcTimeout, RetryPolicy connectionRetryPolicy,
 AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext)
@@ -141,7 +141,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 /**
  * This constructor takes a connectionId, instead of creating a new one.
  */
-private Invoker(Class protocol, Client.ConnectionId connId,
+protected Invoker(Class protocol, Client.ConnectionId connId,
 Configuration conf, SocketFactory factory) {
   this.remoteId = connId;
   this.client = CLIENTS.getClient(conf, factory, RpcWritable.Buffer.class);
@@ -218,8 +218,6 @@ public class ProtobufRpcEngine implements RpcEngine {
 traceScope = 
tracer.newScope(RpcClientUtil.methodToTraceString(method));
   }
 
-  RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
-  
   if (LOG.isTraceEnabled()) {
 LOG.trace(Thread.currentThread().getId() + ": Call -> " +
 remoteId + ": " + method.getName() +
@@ -231,7 +229,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   final RpcWritable.Buffer val;
   try {
 val = (RpcWritable.Buffer) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
-new RpcProtobufRequest(rpcRequestHeader, theRequest), remoteId,
+constructRpcRequest(method, theRequest), remoteId,
 fallbackToSimpleAuth, alignmentContext);
 
   } catch (Throwable e) {
@@ -276,6 +274,11 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 }
 
+protected Writable constructRpcRequest(Method method, Message theRequest) {
+  RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
+  return new RpcProtobufRequest(rpcRequestHeader, theRequest);
+}
+
 private Message getReturnMessage(final Method method,
 final RpcWritable.Buffer buf) throws ServiceException {
   Message prototype = null;
@@ -325,6 +328,14 @@ public class ProtobufRpcEngine implements RpcEngine {
 public ConnectionId getConnectionId() {
   return remoteId;
 }
+
+protected long getClientProtocolVersion() {
+  return clientProtocolVersion;
+}
+
+protected String getProtocolName() {
+  return protocolName;
+}
   }
 
   @VisibleForTesting
@@ -504,6 +515,13 @@ public class ProtobufRpcEngine implements RpcEngine {
 String declaringClassProtoName = 
 rpcRequest.getDeclaringClassProtocolName();
 long clientVersion = rpcRequest.getClientProtocolVersion();
+return call(server, connectionProtocolName, request, receiveTime,
+methodName, declaringClassProtoName, clientVersion);
+  }
+
+  protected Writable call(RPC.Server server, String connectionProtocolName,
+  RpcWritable.Buffer request, long receiveTime, String methodName,
+  String declaringClassProtoName, long clientVersion) throws Exception 
{
 if (server.verbose)
   LOG.info("

[hadoop] branch branch-3.3 updated: HADOOP-17680. Allow ProtobufRpcEngine to be extensible (#2905) Contributed by Hector Chaverri.

2021-05-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new cedebf1  HADOOP-17680. Allow ProtobufRpcEngine to be extensible 
(#2905) Contributed by Hector Chaverri.
cedebf1 is described below

commit cedebf1c27361077d0a9c6a6ec7cc9bb8ec200e5
Author: hchaverr 
AuthorDate: Thu May 6 16:40:45 2021 -0700

HADOOP-17680. Allow ProtobufRpcEngine to be extensible (#2905) Contributed 
by Hector Chaverri.

(cherry picked from commit f40e3eb0590f85bb42d2471992bf5d524628fdd6)
---
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   | 30 +-
 .../org/apache/hadoop/ipc/ProtobufRpcEngine2.java  | 30 +-
 2 files changed, 48 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index b7b7ad4..d539bb2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -122,7 +122,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 factory)), false);
   }
 
-  private static class Invoker implements RpcInvocationHandler {
+  protected static class Invoker implements RpcInvocationHandler {
 private final Map returnTypes = 
 new ConcurrentHashMap();
 private boolean isClosed = false;
@@ -133,7 +133,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 private AtomicBoolean fallbackToSimpleAuth;
 private AlignmentContext alignmentContext;
 
-private Invoker(Class protocol, InetSocketAddress addr,
+protected Invoker(Class protocol, InetSocketAddress addr,
 UserGroupInformation ticket, Configuration conf, SocketFactory factory,
 int rpcTimeout, RetryPolicy connectionRetryPolicy,
 AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext)
@@ -148,7 +148,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 /**
  * This constructor takes a connectionId, instead of creating a new one.
  */
-private Invoker(Class protocol, Client.ConnectionId connId,
+protected Invoker(Class protocol, Client.ConnectionId connId,
 Configuration conf, SocketFactory factory) {
   this.remoteId = connId;
   this.client = CLIENTS.getClient(conf, factory, RpcWritable.Buffer.class);
@@ -225,8 +225,6 @@ public class ProtobufRpcEngine implements RpcEngine {
 traceScope = 
tracer.newScope(RpcClientUtil.methodToTraceString(method));
   }
 
-  RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
-  
   if (LOG.isTraceEnabled()) {
 LOG.trace(Thread.currentThread().getId() + ": Call -> " +
 remoteId + ": " + method.getName() +
@@ -238,7 +236,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   final RpcWritable.Buffer val;
   try {
 val = (RpcWritable.Buffer) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
-new RpcProtobufRequest(rpcRequestHeader, theRequest), remoteId,
+constructRpcRequest(method, theRequest), remoteId,
 fallbackToSimpleAuth, alignmentContext);
 
   } catch (Throwable e) {
@@ -283,6 +281,11 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 }
 
+protected Writable constructRpcRequest(Method method, Message theRequest) {
+  RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
+  return new RpcProtobufRequest(rpcRequestHeader, theRequest);
+}
+
 private Message getReturnMessage(final Method method,
 final RpcWritable.Buffer buf) throws ServiceException {
   Message prototype = null;
@@ -332,6 +335,14 @@ public class ProtobufRpcEngine implements RpcEngine {
 public ConnectionId getConnectionId() {
   return remoteId;
 }
+
+protected long getClientProtocolVersion() {
+  return clientProtocolVersion;
+}
+
+protected String getProtocolName() {
+  return protocolName;
+}
   }
 
   @VisibleForTesting
@@ -518,6 +529,13 @@ public class ProtobufRpcEngine implements RpcEngine {
 String declaringClassProtoName = 
 rpcRequest.getDeclaringClassProtocolName();
 long clientVersion = rpcRequest.getClientProtocolVersion();
+return call(server, connectionProtocolName, request, receiveTime,
+methodName, declaringClassProtoName, clientVersion);
+  }
+
+  protected Writable call(RPC.Server server, String connectionProtocolName,
+  RpcWritable.Buffer request, long receiveTime, String methodName,
+  String declaringClassProtoName, long clientVe

[hadoop] branch trunk updated: HADOOP-17680. Allow ProtobufRpcEngine to be extensible (#2905) Contributed by Hector Chaverri.

2021-05-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f40e3eb  HADOOP-17680. Allow ProtobufRpcEngine to be extensible 
(#2905) Contributed by Hector Chaverri.
f40e3eb is described below

commit f40e3eb0590f85bb42d2471992bf5d524628fdd6
Author: hchaverr 
AuthorDate: Thu May 6 16:40:45 2021 -0700

HADOOP-17680. Allow ProtobufRpcEngine to be extensible (#2905) Contributed 
by Hector Chaverri.
---
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   | 30 +-
 .../org/apache/hadoop/ipc/ProtobufRpcEngine2.java  | 30 +-
 2 files changed, 48 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index a1500d5..882cc14 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -122,7 +122,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 factory)), false);
   }
 
-  private static class Invoker implements RpcInvocationHandler {
+  protected static class Invoker implements RpcInvocationHandler {
 private final Map returnTypes = 
 new ConcurrentHashMap();
 private boolean isClosed = false;
@@ -133,7 +133,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 private AtomicBoolean fallbackToSimpleAuth;
 private AlignmentContext alignmentContext;
 
-private Invoker(Class protocol, InetSocketAddress addr,
+protected Invoker(Class protocol, InetSocketAddress addr,
 UserGroupInformation ticket, Configuration conf, SocketFactory factory,
 int rpcTimeout, RetryPolicy connectionRetryPolicy,
 AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext)
@@ -148,7 +148,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 /**
  * This constructor takes a connectionId, instead of creating a new one.
  */
-private Invoker(Class protocol, Client.ConnectionId connId,
+protected Invoker(Class protocol, Client.ConnectionId connId,
 Configuration conf, SocketFactory factory) {
   this.remoteId = connId;
   this.client = CLIENTS.getClient(conf, factory, RpcWritable.Buffer.class);
@@ -225,8 +225,6 @@ public class ProtobufRpcEngine implements RpcEngine {
 traceScope = 
tracer.newScope(RpcClientUtil.methodToTraceString(method));
   }
 
-  RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
-  
   if (LOG.isTraceEnabled()) {
 LOG.trace(Thread.currentThread().getId() + ": Call -> " +
 remoteId + ": " + method.getName() +
@@ -238,7 +236,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   final RpcWritable.Buffer val;
   try {
 val = (RpcWritable.Buffer) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
-new RpcProtobufRequest(rpcRequestHeader, theRequest), remoteId,
+constructRpcRequest(method, theRequest), remoteId,
 fallbackToSimpleAuth, alignmentContext);
 
   } catch (Throwable e) {
@@ -283,6 +281,11 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 }
 
+protected Writable constructRpcRequest(Method method, Message theRequest) {
+  RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
+  return new RpcProtobufRequest(rpcRequestHeader, theRequest);
+}
+
 private Message getReturnMessage(final Method method,
 final RpcWritable.Buffer buf) throws ServiceException {
   Message prototype = null;
@@ -332,6 +335,14 @@ public class ProtobufRpcEngine implements RpcEngine {
 public ConnectionId getConnectionId() {
   return remoteId;
 }
+
+protected long getClientProtocolVersion() {
+  return clientProtocolVersion;
+}
+
+protected String getProtocolName() {
+  return protocolName;
+}
   }
 
   @VisibleForTesting
@@ -518,6 +529,13 @@ public class ProtobufRpcEngine implements RpcEngine {
 String declaringClassProtoName = 
 rpcRequest.getDeclaringClassProtocolName();
 long clientVersion = rpcRequest.getClientProtocolVersion();
+return call(server, connectionProtocolName, request, receiveTime,
+methodName, declaringClassProtoName, clientVersion);
+  }
+
+  protected Writable call(RPC.Server server, String connectionProtocolName,
+  RpcWritable.Buffer request, long receiveTime, String methodName,
+  String declaringClassProtoName, long clientVersion) throws Exception 
{
 if (server.verbose)
   LOG.info("

[hadoop] branch branch-2.10 updated: HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

2021-04-29 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 77d9c6d  HDFS-15652. Make block size from NNThroughputBenchmark 
configurable (#2416)
77d9c6d is described below

commit 77d9c6d0f75ff5ca690d9aeb2ae6a5e27418b23c
Author: Hui Fei 
AuthorDate: Wed Oct 28 09:13:25 2020 +0800

HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

(cherry picked from commit 8a6d5b9151cd4a922372835b18a9a031c9d3475e)
---
 .../server/namenode/NNThroughputBenchmark.java | 41 +++---
 .../server/namenode/TestNNThroughputBenchmark.java |  8 -
 2 files changed, 36 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 2147129..b120d7a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -168,6 +168,7 @@ public class NNThroughputBenchmark implements Tool {
 
 protected final String baseDir;
 protected short replication;
+protected int blockSize;
 protected int  numThreads = 0;// number of threads
 protected int  numOpsRequired = 0;// number of operations requested
 protected int  numOpsExecuted = 0;// number of operations executed
@@ -229,6 +230,7 @@ public class NNThroughputBenchmark implements Tool {
 OperationStatsBase() {
   baseDir = BASE_DIR_NAME + "/" + getOpName();
   replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
3);
+  blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
   numOpsRequired = 10;
   numThreads = 3;
   logLevel = Level.ERROR;
@@ -516,7 +518,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_CREATE_NAME = "create";
 static final String OP_CREATE_USAGE = 
-  "-op create [-threads T] [-files N] [-filesPerDir P] [-close]";
+"-op create [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-close]";
 
 protected FileNameGenerator nameGenerator;
 protected String[][] fileNames;
@@ -541,6 +544,9 @@ public class NNThroughputBenchmark implements Tool {
 if(args.get(i).equals("-files")) {
   if(i+1 == args.size())  printUsage();
   numOpsRequired = Integer.parseInt(args.get(++i));
+} else if (args.get(i).equals("-blockSize")) {
+  if(i+1 == args.size())  printUsage();
+  blockSize = Integer.parseInt(args.get(++i));
 } else if(args.get(i).equals("-threads")) {
   if(i+1 == args.size())  printUsage();
   numThreads = Integer.parseInt(args.get(++i));
@@ -596,7 +602,7 @@ public class NNThroughputBenchmark implements Tool {
   clientProto.create(fileNames[daemonId][inputIdx], 
FsPermission.getDefault(),
   clientName, new EnumSetWritable(EnumSet
   .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, 
-  replication, BLOCK_SIZE, CryptoProtocolVersion.supported());
+  replication, blockSize, CryptoProtocolVersion.supported());
   long end = Time.now();
   for(boolean written = !closeUponCreate; !written; 
 written = clientProto.complete(fileNames[daemonId][inputIdx],
@@ -716,7 +722,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_OPEN_NAME = "open";
 static final String OP_USAGE_ARGS = 
-  " [-threads T] [-files N] [-filesPerDir P] [-useExisting]";
+" [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-useExisting]";
 static final String OP_OPEN_USAGE = 
   "-op " + OP_OPEN_NAME + OP_USAGE_ARGS;
 
@@ -748,6 +755,7 @@ public class NNThroughputBenchmark implements Tool {
   "-op", "create", 
   "-threads", String.valueOf(this.numThreads), 
   "-files", String.valueOf(numOpsRequired),
+  "-blockSize", String.valueOf(blockSize),
   "-filesPerDir", 
   String.valueOf(nameGenerator.getFilesPerDirectory()),
   "-close"};
@@ -778,7 +786,8 @@ public class NNThroughputBenchmark implements Tool {
 long executeOp(int daemonId, int inputIdx, String ignore) 
 throws IOException {
   long start = Time.now();
-  

[hadoop] branch branch-3.1 updated: HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

2021-04-29 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new c4c7801  HDFS-15652. Make block size from NNThroughputBenchmark 
configurable (#2416)
c4c7801 is described below

commit c4c78016eb5aa87951ef9c357df8445e35cd0d77
Author: Hui Fei 
AuthorDate: Wed Oct 28 09:13:25 2020 +0800

HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

(cherry picked from commit 8a6d5b9151cd4a922372835b18a9a031c9d3475e)
---
 .../server/namenode/NNThroughputBenchmark.java | 41 +++---
 .../server/namenode/TestNNThroughputBenchmark.java |  8 -
 2 files changed, 36 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 4e0bce8..19a24f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -168,6 +168,7 @@ public class NNThroughputBenchmark implements Tool {
 
 protected final String baseDir;
 protected short replication;
+protected int blockSize;
 protected int  numThreads = 0;// number of threads
 protected int  numOpsRequired = 0;// number of operations requested
 protected int  numOpsExecuted = 0;// number of operations executed
@@ -229,6 +230,7 @@ public class NNThroughputBenchmark implements Tool {
 OperationStatsBase() {
   baseDir = BASE_DIR_NAME + "/" + getOpName();
   replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
3);
+  blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
   numOpsRequired = 10;
   numThreads = 3;
   logLevel = Level.ERROR;
@@ -516,7 +518,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_CREATE_NAME = "create";
 static final String OP_CREATE_USAGE = 
-  "-op create [-threads T] [-files N] [-filesPerDir P] [-close]";
+"-op create [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-close]";
 
 protected FileNameGenerator nameGenerator;
 protected String[][] fileNames;
@@ -541,6 +544,9 @@ public class NNThroughputBenchmark implements Tool {
 if(args.get(i).equals("-files")) {
   if(i+1 == args.size())  printUsage();
   numOpsRequired = Integer.parseInt(args.get(++i));
+} else if (args.get(i).equals("-blockSize")) {
+  if(i+1 == args.size())  printUsage();
+  blockSize = Integer.parseInt(args.get(++i));
 } else if(args.get(i).equals("-threads")) {
   if(i+1 == args.size())  printUsage();
   numThreads = Integer.parseInt(args.get(++i));
@@ -597,7 +603,7 @@ public class NNThroughputBenchmark implements Tool {
   FsPermission.getDefault(), clientName,
   new EnumSetWritable(EnumSet
   .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true,
-  replication, BLOCK_SIZE, CryptoProtocolVersion.supported(), null);
+  replication, blockSize, CryptoProtocolVersion.supported(), null);
   long end = Time.now();
   for (boolean written = !closeUponCreate; !written;
 written = clientProto.complete(fileNames[daemonId][inputIdx],
@@ -718,7 +724,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_OPEN_NAME = "open";
 static final String OP_USAGE_ARGS = 
-  " [-threads T] [-files N] [-filesPerDir P] [-useExisting]";
+" [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-useExisting]";
 static final String OP_OPEN_USAGE = 
   "-op " + OP_OPEN_NAME + OP_USAGE_ARGS;
 
@@ -750,6 +757,7 @@ public class NNThroughputBenchmark implements Tool {
   "-op", "create", 
   "-threads", String.valueOf(this.numThreads), 
   "-files", String.valueOf(numOpsRequired),
+  "-blockSize", String.valueOf(blockSize),
   "-filesPerDir", 
   String.valueOf(nameGenerator.getFilesPerDirectory()),
   "-close"};
@@ -780,7 +788,8 @@ public class NNThroughputBenchmark implements Tool {
 long executeOp(int daemonId, int inputIdx, String ignore) 
 throws IOException {
   long start = Time.now();
-  clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L,

[hadoop] branch branch-3.2 updated: HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

2021-04-29 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new ab4e90c  HDFS-15652. Make block size from NNThroughputBenchmark 
configurable (#2416)
ab4e90c is described below

commit ab4e90cc28d8f022b8b14416830fde64ea3a06d4
Author: Hui Fei 
AuthorDate: Wed Oct 28 09:13:25 2020 +0800

HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

(cherry picked from commit 8a6d5b9151cd4a922372835b18a9a031c9d3475e)
---
 .../server/namenode/NNThroughputBenchmark.java | 41 +++---
 .../server/namenode/TestNNThroughputBenchmark.java |  8 -
 2 files changed, 36 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 1a5718a..b8045eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -169,6 +169,7 @@ public class NNThroughputBenchmark implements Tool {
 
 protected final String baseDir;
 protected short replication;
+protected int blockSize;
 protected int  numThreads = 0;// number of threads
 protected int  numOpsRequired = 0;// number of operations requested
 protected int  numOpsExecuted = 0;// number of operations executed
@@ -230,6 +231,7 @@ public class NNThroughputBenchmark implements Tool {
 OperationStatsBase() {
   baseDir = BASE_DIR_NAME + "/" + getOpName();
   replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
3);
+  blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
   numOpsRequired = 10;
   numThreads = 3;
   logLevel = Level.ERROR;
@@ -517,7 +519,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_CREATE_NAME = "create";
 static final String OP_CREATE_USAGE = 
-  "-op create [-threads T] [-files N] [-filesPerDir P] [-close]";
+"-op create [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-close]";
 
 protected FileNameGenerator nameGenerator;
 protected String[][] fileNames;
@@ -542,6 +545,9 @@ public class NNThroughputBenchmark implements Tool {
 if(args.get(i).equals("-files")) {
   if(i+1 == args.size())  printUsage();
   numOpsRequired = Integer.parseInt(args.get(++i));
+} else if (args.get(i).equals("-blockSize")) {
+  if(i+1 == args.size())  printUsage();
+  blockSize = Integer.parseInt(args.get(++i));
 } else if(args.get(i).equals("-threads")) {
   if(i+1 == args.size())  printUsage();
   numThreads = Integer.parseInt(args.get(++i));
@@ -598,7 +604,7 @@ public class NNThroughputBenchmark implements Tool {
   FsPermission.getDefault(), clientName,
   new EnumSetWritable(EnumSet
   .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true,
-  replication, BLOCK_SIZE, CryptoProtocolVersion.supported(), null);
+  replication, blockSize, CryptoProtocolVersion.supported(), null);
   long end = Time.now();
   for (boolean written = !closeUponCreate; !written;
 written = clientProto.complete(fileNames[daemonId][inputIdx],
@@ -719,7 +725,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_OPEN_NAME = "open";
 static final String OP_USAGE_ARGS = 
-  " [-threads T] [-files N] [-filesPerDir P] [-useExisting]";
+" [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-useExisting]";
 static final String OP_OPEN_USAGE = 
   "-op " + OP_OPEN_NAME + OP_USAGE_ARGS;
 
@@ -751,6 +758,7 @@ public class NNThroughputBenchmark implements Tool {
   "-op", "create", 
   "-threads", String.valueOf(this.numThreads), 
   "-files", String.valueOf(numOpsRequired),
+  "-blockSize", String.valueOf(blockSize),
   "-filesPerDir", 
   String.valueOf(nameGenerator.getFilesPerDirectory()),
   "-close"};
@@ -781,7 +789,8 @@ public class NNThroughputBenchmark implements Tool {
 long executeOp(int daemonId, int inputIdx, String ignore) 
 throws IOException {
   long start = Time.now();
-  clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L,

[hadoop] branch branch-3.3 updated: HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

2021-04-29 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 9aa6106  HDFS-15652. Make block size from NNThroughputBenchmark 
configurable (#2416)
9aa6106 is described below

commit 9aa610668902d60abceb8c78523c47f25b346c0a
Author: Hui Fei 
AuthorDate: Wed Oct 28 09:13:25 2020 +0800

HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

(cherry picked from commit 8a6d5b9151cd4a922372835b18a9a031c9d3475e)
---
 .../server/namenode/NNThroughputBenchmark.java | 41 +++---
 .../server/namenode/TestNNThroughputBenchmark.java |  8 -
 2 files changed, 36 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 245f5be..513c609 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -169,6 +169,7 @@ public class NNThroughputBenchmark implements Tool {
 
 protected final String baseDir;
 protected short replication;
+protected int blockSize;
 protected int  numThreads = 0;// number of threads
 protected int  numOpsRequired = 0;// number of operations requested
 protected int  numOpsExecuted = 0;// number of operations executed
@@ -230,6 +231,7 @@ public class NNThroughputBenchmark implements Tool {
 OperationStatsBase() {
   baseDir = BASE_DIR_NAME + "/" + getOpName();
   replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
3);
+  blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
   numOpsRequired = 10;
   numThreads = 3;
   logLevel = Level.ERROR;
@@ -517,7 +519,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_CREATE_NAME = "create";
 static final String OP_CREATE_USAGE = 
-  "-op create [-threads T] [-files N] [-filesPerDir P] [-close]";
+"-op create [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-close]";
 
 protected FileNameGenerator nameGenerator;
 protected String[][] fileNames;
@@ -542,6 +545,9 @@ public class NNThroughputBenchmark implements Tool {
 if(args.get(i).equals("-files")) {
   if(i+1 == args.size())  printUsage();
   numOpsRequired = Integer.parseInt(args.get(++i));
+} else if (args.get(i).equals("-blockSize")) {
+  if(i+1 == args.size())  printUsage();
+  blockSize = Integer.parseInt(args.get(++i));
 } else if(args.get(i).equals("-threads")) {
   if(i+1 == args.size())  printUsage();
   numThreads = Integer.parseInt(args.get(++i));
@@ -598,7 +604,7 @@ public class NNThroughputBenchmark implements Tool {
   FsPermission.getDefault(), clientName,
   new EnumSetWritable(EnumSet
   .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true,
-  replication, BLOCK_SIZE, CryptoProtocolVersion.supported(), null,
+  replication, blockSize, CryptoProtocolVersion.supported(), null,
   null);
   long end = Time.now();
   for (boolean written = !closeUponCreate; !written;
@@ -720,7 +726,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_OPEN_NAME = "open";
 static final String OP_USAGE_ARGS = 
-  " [-threads T] [-files N] [-filesPerDir P] [-useExisting]";
+" [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-useExisting]";
 static final String OP_OPEN_USAGE = 
   "-op " + OP_OPEN_NAME + OP_USAGE_ARGS;
 
@@ -752,6 +759,7 @@ public class NNThroughputBenchmark implements Tool {
   "-op", "create", 
   "-threads", String.valueOf(this.numThreads), 
   "-files", String.valueOf(numOpsRequired),
+  "-blockSize", String.valueOf(blockSize),
   "-filesPerDir", 
   String.valueOf(nameGenerator.getFilesPerDirectory()),
   "-close"};
@@ -782,7 +790,8 @@ public class NNThroughputBenchmark implements Tool {
 long executeOp(int daemonId, int inputIdx, String ignore) 
 throws IOException {
   long start = Time.now();
-  clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, 
BLOCK_SIZE);
+  clientProto.getBlockLo

[hadoop] branch branch-2.10 updated: HDFS-15808. Add metrics for FSNamesystem read/write lock hold long time. (#2668) Contributed by tomscut.

2021-03-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 68cccba  HDFS-15808. Add metrics for FSNamesystem read/write lock hold 
long time. (#2668) Contributed by tomscut.
68cccba is described below

commit 68cccba8939cdf01923dc523d450df915f9931a4
Author: tomscut 
AuthorDate: Mon Mar 1 16:35:12 2021 -0800

HDFS-15808. Add metrics for FSNamesystem read/write lock hold long time. 
(#2668) Contributed by tomscut.

(cherry picked from commit 9cb51bf106802c78b1400fba9f1d1c7e772dd5e7)
---
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  | 14 +
 .../hdfs/server/namenode/FSNamesystemLock.java | 34 ++
 2 files changed, 48 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7394c60..9c6e1c8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4161,6 +4161,20 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return fsLock.getQueueLength();
   }
 
+  @Metric(value = {"ReadLockLongHoldCount", "The number of time " +
+  "the read lock has been held for longer than the threshold"},
+  type = Metric.Type.COUNTER)
+  public long getNumOfReadLockLongHold() {
+return fsLock.getNumOfReadLockLongHold();
+  }
+
+  @Metric(value = {"WriteLockLongHoldCount", "The number of time " +
+  "the write lock has been held for longer than the threshold"},
+  type = Metric.Type.COUNTER)
+  public long getNumOfWriteLockLongHold() {
+return fsLock.getNumOfWriteLockLongHold();
+  }
+
   int getNumberOfDatanodes(DatanodeReportType type) {
 readLock();
 try {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index 384bdee..e575ea8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -101,6 +101,16 @@ class FSNamesystemLock {
   private final AtomicLong timeStampOfLastReadLockReportMs = new AtomicLong(0);
   /** Longest time (ms) a read lock was held since the last report. */
   private final AtomicLong longestReadLockHeldIntervalMs = new AtomicLong(0);
+  /**
+   * The number of time the read lock
+   * has been held longer than the threshold.
+   */
+  private final AtomicLong numReadLockLongHold = new AtomicLong(0);
+  /**
+   * The number of time the write lock
+   * has been held for longer than the threshold.
+   */
+  private final AtomicLong numWriteLockLongHold = new AtomicLong(0);
 
   @VisibleForTesting
   static final String OP_NAME_OTHER = "OTHER";
@@ -168,6 +178,7 @@ class FSNamesystemLock {
 final long readLockIntervalMs =
 TimeUnit.NANOSECONDS.toMillis(readLockIntervalNanos);
 if (needReport && readLockIntervalMs >= this.readLockReportingThresholdMs) 
{
+  numReadLockLongHold.incrementAndGet();
   long localLongestReadLock;
   do {
 localLongestReadLock = longestReadLockHeldIntervalMs.get();
@@ -224,6 +235,7 @@ class FSNamesystemLock {
 LogAction logAction = LogThrottlingHelper.DO_NOT_LOG;
 if (needReport &&
 writeLockIntervalMs >= this.writeLockReportingThresholdMs) {
+  numWriteLockLongHold.incrementAndGet();
   logAction = writeLockReportLogger
   .record("write", currentTimeMs, writeLockIntervalMs);
 }
@@ -264,6 +276,28 @@ class FSNamesystemLock {
   }
 
   /**
+   * Returns the number of time the read lock
+   * has been held longer than the threshold.
+   *
+   * @return long - Number of time the read lock
+   * has been held longer than the threshold
+   */
+  public long getNumOfReadLockLongHold() {
+return numReadLockLongHold.get();
+  }
+
+  /**
+   * Returns the number of time the write lock
+   * has been held longer than the threshold.
+   *
+   * @return long - Number of time the write lock
+   * has been held longer than the threshold.
+   */
+  public long getNumOfWriteLockLongHold() {
+return numWriteLockLongHold.get();
+  }
+
+  /**
* Returns the QueueLength of waiting threads.
*
* A larger number indicates greater lock contention.



[hadoop] branch branch-3.1 updated: HDFS-15808. Add metrics for FSNamesystem read/write lock hold long time. (#2668) Contributed by tomscut.

2021-03-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 7b925bc  HDFS-15808. Add metrics for FSNamesystem read/write lock hold 
long time. (#2668) Contributed by tomscut.
7b925bc is described below

commit 7b925bc48660847aad67948017808768bf61474a
Author: tomscut 
AuthorDate: Mon Mar 1 16:35:12 2021 -0800

HDFS-15808. Add metrics for FSNamesystem read/write lock hold long time. 
(#2668) Contributed by tomscut.

(cherry picked from commit 9cb51bf106802c78b1400fba9f1d1c7e772dd5e7)
---
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  | 14 +
 .../hdfs/server/namenode/FSNamesystemLock.java | 34 ++
 2 files changed, 48 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b13ba0a..fd4fa65 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4379,6 +4379,20 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return fsLock.getQueueLength();
   }
 
+  @Metric(value = {"ReadLockLongHoldCount", "The number of time " +
+  "the read lock has been held for longer than the threshold"},
+  type = Metric.Type.COUNTER)
+  public long getNumOfReadLockLongHold() {
+return fsLock.getNumOfReadLockLongHold();
+  }
+
+  @Metric(value = {"WriteLockLongHoldCount", "The number of time " +
+  "the write lock has been held for longer than the threshold"},
+  type = Metric.Type.COUNTER)
+  public long getNumOfWriteLockLongHold() {
+return fsLock.getNumOfWriteLockLongHold();
+  }
+
   int getNumberOfDatanodes(DatanodeReportType type) {
 readLock();
 try {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index 8b1b44f..5eeb4cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -101,6 +101,16 @@ class FSNamesystemLock {
   private final AtomicLong timeStampOfLastReadLockReportMs = new AtomicLong(0);
   /** Longest time (ms) a read lock was held since the last report. */
   private final AtomicLong longestReadLockHeldIntervalMs = new AtomicLong(0);
+  /**
+   * The number of time the read lock
+   * has been held longer than the threshold.
+   */
+  private final AtomicLong numReadLockLongHold = new AtomicLong(0);
+  /**
+   * The number of time the write lock
+   * has been held for longer than the threshold.
+   */
+  private final AtomicLong numWriteLockLongHold = new AtomicLong(0);
 
   @VisibleForTesting
   static final String OP_NAME_OTHER = "OTHER";
@@ -168,6 +178,7 @@ class FSNamesystemLock {
 final long readLockIntervalMs =
 TimeUnit.NANOSECONDS.toMillis(readLockIntervalNanos);
 if (needReport && readLockIntervalMs >= this.readLockReportingThresholdMs) 
{
+  numReadLockLongHold.incrementAndGet();
   long localLongestReadLock;
   do {
 localLongestReadLock = longestReadLockHeldIntervalMs.get();
@@ -245,6 +256,7 @@ class FSNamesystemLock {
 LogAction logAction = LogThrottlingHelper.DO_NOT_LOG;
 if (needReport &&
 writeLockIntervalMs >= this.writeLockReportingThresholdMs) {
+  numWriteLockLongHold.incrementAndGet();
   logAction = writeLockReportLogger
   .record("write", currentTimeMs, writeLockIntervalMs);
 }
@@ -283,6 +295,28 @@ class FSNamesystemLock {
   }
 
   /**
+   * Returns the number of time the read lock
+   * has been held longer than the threshold.
+   *
+   * @return long - Number of time the read lock
+   * has been held longer than the threshold
+   */
+  public long getNumOfReadLockLongHold() {
+return numReadLockLongHold.get();
+  }
+
+  /**
+   * Returns the number of time the write lock
+   * has been held longer than the threshold.
+   *
+   * @return long - Number of time the write lock
+   * has been held longer than the threshold.
+   */
+  public long getNumOfWriteLockLongHold() {
+return numWriteLockLongHold.get();
+  }
+
+  /**
* Returns the QueueLength of waiting threads.
*
* A larger number indicates greater lock contention.



[hadoop] branch branch-3.2 updated: HDFS-15808. Add metrics for FSNamesystem read/write lock hold long time. (#2668) Contributed by tomscut.

2021-03-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 03ac2e4  HDFS-15808. Add metrics for FSNamesystem read/write lock hold 
long time. (#2668) Contributed by tomscut.
03ac2e4 is described below

commit 03ac2e41c05be67f5e250fcaa477d7e25590b1e0
Author: tomscut 
AuthorDate: Mon Mar 1 16:35:12 2021 -0800

HDFS-15808. Add metrics for FSNamesystem read/write lock hold long time. 
(#2668) Contributed by tomscut.

(cherry picked from commit 9cb51bf106802c78b1400fba9f1d1c7e772dd5e7)
---
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  | 14 +
 .../hdfs/server/namenode/FSNamesystemLock.java | 34 ++
 2 files changed, 48 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 319a0c2..f66771d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4440,6 +4440,20 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return fsLock.getQueueLength();
   }
 
+  @Metric(value = {"ReadLockLongHoldCount", "The number of time " +
+  "the read lock has been held for longer than the threshold"},
+  type = Metric.Type.COUNTER)
+  public long getNumOfReadLockLongHold() {
+return fsLock.getNumOfReadLockLongHold();
+  }
+
+  @Metric(value = {"WriteLockLongHoldCount", "The number of time " +
+  "the write lock has been held for longer than the threshold"},
+  type = Metric.Type.COUNTER)
+  public long getNumOfWriteLockLongHold() {
+return fsLock.getNumOfWriteLockLongHold();
+  }
+
   int getNumberOfDatanodes(DatanodeReportType type) {
 readLock();
 try {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index 98b4e6d..ae549fe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -101,6 +101,16 @@ class FSNamesystemLock {
   private final AtomicLong timeStampOfLastReadLockReportMs = new AtomicLong(0);
   /** Longest time (ms) a read lock was held since the last report. */
   private final AtomicLong longestReadLockHeldIntervalMs = new AtomicLong(0);
+  /**
+   * The number of time the read lock
+   * has been held longer than the threshold.
+   */
+  private final AtomicLong numReadLockLongHold = new AtomicLong(0);
+  /**
+   * The number of time the write lock
+   * has been held for longer than the threshold.
+   */
+  private final AtomicLong numWriteLockLongHold = new AtomicLong(0);
 
   @VisibleForTesting
   static final String OP_NAME_OTHER = "OTHER";
@@ -168,6 +178,7 @@ class FSNamesystemLock {
 final long readLockIntervalMs =
 TimeUnit.NANOSECONDS.toMillis(readLockIntervalNanos);
 if (needReport && readLockIntervalMs >= this.readLockReportingThresholdMs) 
{
+  numReadLockLongHold.incrementAndGet();
   long localLongestReadLock;
   do {
 localLongestReadLock = longestReadLockHeldIntervalMs.get();
@@ -245,6 +256,7 @@ class FSNamesystemLock {
 LogAction logAction = LogThrottlingHelper.DO_NOT_LOG;
 if (needReport &&
 writeLockIntervalMs >= this.writeLockReportingThresholdMs) {
+  numWriteLockLongHold.incrementAndGet();
   logAction = writeLockReportLogger
   .record("write", currentTimeMs, writeLockIntervalMs);
 }
@@ -283,6 +295,28 @@ class FSNamesystemLock {
   }
 
   /**
+   * Returns the number of time the read lock
+   * has been held longer than the threshold.
+   *
+   * @return long - Number of time the read lock
+   * has been held longer than the threshold
+   */
+  public long getNumOfReadLockLongHold() {
+return numReadLockLongHold.get();
+  }
+
+  /**
+   * Returns the number of time the write lock
+   * has been held longer than the threshold.
+   *
+   * @return long - Number of time the write lock
+   * has been held longer than the threshold.
+   */
+  public long getNumOfWriteLockLongHold() {
+return numWriteLockLongHold.get();
+  }
+
+  /**
* Returns the QueueLength of waiting threads.
*
* A larger number indicates greater lock contention.



[hadoop] branch branch-3.3 updated: HDFS-15808. Add metrics for FSNamesystem read/write lock hold long time. (#2668) Contributed by tomscut.

2021-03-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new c498ef4  HDFS-15808. Add metrics for FSNamesystem read/write lock hold 
long time. (#2668) Contributed by tomscut.
c498ef4 is described below

commit c498ef4b6dc7960f1f4699cafea598b3d71ca885
Author: tomscut 
AuthorDate: Mon Mar 1 16:35:12 2021 -0800

HDFS-15808. Add metrics for FSNamesystem read/write lock hold long time. 
(#2668) Contributed by tomscut.

(cherry picked from commit 9cb51bf106802c78b1400fba9f1d1c7e772dd5e7)
---
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  | 14 +
 .../hdfs/server/namenode/FSNamesystemLock.java | 34 ++
 2 files changed, 48 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 6a6405b..fae094a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4758,6 +4758,20 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return fsLock.getQueueLength();
   }
 
+  @Metric(value = {"ReadLockLongHoldCount", "The number of time " +
+  "the read lock has been held for longer than the threshold"},
+  type = Metric.Type.COUNTER)
+  public long getNumOfReadLockLongHold() {
+return fsLock.getNumOfReadLockLongHold();
+  }
+
+  @Metric(value = {"WriteLockLongHoldCount", "The number of time " +
+  "the write lock has been held for longer than the threshold"},
+  type = Metric.Type.COUNTER)
+  public long getNumOfWriteLockLongHold() {
+return fsLock.getNumOfWriteLockLongHold();
+  }
+
   int getNumberOfDatanodes(DatanodeReportType type) {
 readLock();
 try {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index 811cd23..6502c4c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -108,6 +108,16 @@ class FSNamesystemLock {
   private final AtomicReference longestReadLockHeldInfo =
   new AtomicReference<>(new LockHeldInfo(0, 0, null));
   private LockHeldInfo longestWriteLockHeldInfo = new LockHeldInfo(0, 0, null);
+  /**
+   * The number of time the read lock
+   * has been held longer than the threshold.
+   */
+  private final AtomicLong numReadLockLongHold = new AtomicLong(0);
+  /**
+   * The number of time the write lock
+   * has been held for longer than the threshold.
+   */
+  private final AtomicLong numWriteLockLongHold = new AtomicLong(0);
 
   @VisibleForTesting
   static final String OP_NAME_OTHER = "OTHER";
@@ -176,6 +186,7 @@ class FSNamesystemLock {
 final long readLockIntervalMs =
 TimeUnit.NANOSECONDS.toMillis(readLockIntervalNanos);
 if (needReport && readLockIntervalMs >= this.readLockReportingThresholdMs) 
{
+  numReadLockLongHold.incrementAndGet();
   LockHeldInfo localLockHeldInfo;
   do {
 localLockHeldInfo = longestReadLockHeldInfo.get();
@@ -253,6 +264,7 @@ class FSNamesystemLock {
 LogAction logAction = LogThrottlingHelper.DO_NOT_LOG;
 if (needReport &&
 writeLockIntervalMs >= this.writeLockReportingThresholdMs) {
+  numWriteLockLongHold.incrementAndGet();
   if (longestWriteLockHeldInfo.getIntervalMs() < writeLockIntervalMs) {
 longestWriteLockHeldInfo =
 new LockHeldInfo(currentTimeMs, writeLockIntervalMs,
@@ -303,6 +315,28 @@ class FSNamesystemLock {
   }
 
   /**
+   * Returns the number of time the read lock
+   * has been held longer than the threshold.
+   *
+   * @return long - Number of time the read lock
+   * has been held longer than the threshold
+   */
+  public long getNumOfReadLockLongHold() {
+return numReadLockLongHold.get();
+  }
+
+  /**
+   * Returns the number of time the write lock
+   * has been held longer than the threshold.
+   *
+   * @return long - Number of time the write lock
+   * has been held longer than the threshold.
+   */
+  public long getNumOfWriteLockLongHold() {
+return numWriteLockLongHold.get();
+  }
+
+  /**
* Returns the QueueLength of waiting threads.
*
* A larger number indicates greater lock contention.



[hadoop] branch trunk updated: HDFS-15808. Add metrics for FSNamesystem read/write lock hold long time. (#2668) Contributed by tomscut.

2021-03-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9cb51bf  HDFS-15808. Add metrics for FSNamesystem read/write lock hold 
long time. (#2668) Contributed by tomscut.
9cb51bf is described below

commit 9cb51bf106802c78b1400fba9f1d1c7e772dd5e7
Author: tomscut 
AuthorDate: Mon Mar 1 16:35:12 2021 -0800

HDFS-15808. Add metrics for FSNamesystem read/write lock hold long time. 
(#2668) Contributed by tomscut.
---
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  | 14 +
 .../hdfs/server/namenode/FSNamesystemLock.java | 34 ++
 2 files changed, 48 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 22b4b92..ff03d7b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4830,6 +4830,20 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return fsLock.getQueueLength();
   }
 
+  @Metric(value = {"ReadLockLongHoldCount", "The number of time " +
+  "the read lock has been held for longer than the threshold"},
+  type = Metric.Type.COUNTER)
+  public long getNumOfReadLockLongHold() {
+return fsLock.getNumOfReadLockLongHold();
+  }
+
+  @Metric(value = {"WriteLockLongHoldCount", "The number of time " +
+  "the write lock has been held for longer than the threshold"},
+  type = Metric.Type.COUNTER)
+  public long getNumOfWriteLockLongHold() {
+return fsLock.getNumOfWriteLockLongHold();
+  }
+
   int getNumberOfDatanodes(DatanodeReportType type) {
 readLock();
 try {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index c03cfd5..842c6b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -109,6 +109,16 @@ class FSNamesystemLock {
   private final AtomicReference longestReadLockHeldInfo =
   new AtomicReference<>(new LockHeldInfo());
   private LockHeldInfo longestWriteLockHeldInfo = new LockHeldInfo();
+  /**
+   * The number of time the read lock
+   * has been held longer than the threshold.
+   */
+  private final AtomicLong numReadLockLongHold = new AtomicLong(0);
+  /**
+   * The number of time the write lock
+   * has been held for longer than the threshold.
+   */
+  private final AtomicLong numWriteLockLongHold = new AtomicLong(0);
 
   @VisibleForTesting
   static final String OP_NAME_OTHER = "OTHER";
@@ -182,6 +192,7 @@ class FSNamesystemLock {
 final long readLockIntervalMs =
 TimeUnit.NANOSECONDS.toMillis(readLockIntervalNanos);
 if (needReport && readLockIntervalMs >= this.readLockReportingThresholdMs) 
{
+  numReadLockLongHold.incrementAndGet();
   String lockReportInfo = null;
   boolean done = false;
   while (!done) {
@@ -298,6 +309,7 @@ class FSNamesystemLock {
 LogAction logAction = LogThrottlingHelper.DO_NOT_LOG;
 if (needReport &&
 writeLockIntervalMs >= this.writeLockReportingThresholdMs) {
+  numWriteLockLongHold.incrementAndGet();
   if (longestWriteLockHeldInfo.getIntervalMs() <= writeLockIntervalMs) {
 String lockReportInfo = lockReportInfoSupplier != null ? " (" +
 lockReportInfoSupplier.get() + ")" : "";
@@ -363,6 +375,28 @@ class FSNamesystemLock {
   }
 
   /**
+   * Returns the number of time the read lock
+   * has been held longer than the threshold.
+   *
+   * @return long - Number of time the read lock
+   * has been held longer than the threshold
+   */
+  public long getNumOfReadLockLongHold() {
+return numReadLockLongHold.get();
+  }
+
+  /**
+   * Returns the number of time the write lock
+   * has been held longer than the threshold.
+   *
+   * @return long - Number of time the write lock
+   * has been held longer than the threshold.
+   */
+  public long getNumOfWriteLockLongHold() {
+return numWriteLockLongHold.get();
+  }
+
+  /**
* Add the lock hold time for a recent operation to the metrics.
* @param operationName Name of the operation for which to record the time
* @param value Length of time the lock was held (n

[hadoop] branch branch-2.10 updated: HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed by Qi Zhu.

2021-03-01 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new cdd3982  HDFS-15849. ExpiredHeartbeats metric should be of 
Type.COUNTER. Contributed by Qi Zhu.
cdd3982 is described below

commit cdd3982db42ede60fe2d5752951dfe95aada19c9
Author: Konstantin V Shvachko 
AuthorDate: Mon Mar 1 16:48:48 2021 -0800

HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed 
by Qi Zhu.

(cherry picked from commit 9501c698f4789188f744f3a1fba7f1a9bb9b8aa4)
---
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java  | 4 ++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 24cf469..7394c60 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4007,7 +4007,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return blockManager.getMissingReplOneBlocksCount();
   }
   
-  @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
+  @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"},
+  type = Metric.Type.COUNTER)
   public int getExpiredHeartbeats() {
 return datanodeStatistics.getExpiredHeartbeats();
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index fea377f..1c42c70 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -17,7 +17,7 @@
 */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 
@@ -143,7 +143,7 @@ public class TestDatanodeReport {
   assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
 
   Thread.sleep(5000);
-  assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
+  assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed by Qi Zhu.

2021-03-01 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 78bd68a  HDFS-15849. ExpiredHeartbeats metric should be of 
Type.COUNTER. Contributed by Qi Zhu.
78bd68a is described below

commit 78bd68a0b8c09af8017cccdadaa6dd0edf3a9071
Author: Konstantin V Shvachko 
AuthorDate: Mon Mar 1 16:48:48 2021 -0800

HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed 
by Qi Zhu.

(cherry picked from commit 9501c698f4789188f744f3a1fba7f1a9bb9b8aa4)
---
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java  | 4 ++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index edcbd0f..b13ba0a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4188,7 +4188,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return blockManager.getMissingReplOneBlocksCount();
   }
   
-  @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
+  @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"},
+  type = Metric.Type.COUNTER)
   public int getExpiredHeartbeats() {
 return datanodeStatistics.getExpiredHeartbeats();
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index fea377f..1c42c70 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -17,7 +17,7 @@
 */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 
@@ -143,7 +143,7 @@ public class TestDatanodeReport {
   assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
 
   Thread.sleep(5000);
-  assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
+  assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed by Qi Zhu.

2021-03-01 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 1d48f3b  HDFS-15849. ExpiredHeartbeats metric should be of 
Type.COUNTER. Contributed by Qi Zhu.
1d48f3b is described below

commit 1d48f3b44c815193854e754b562dbf00a72f9b32
Author: Konstantin V Shvachko 
AuthorDate: Mon Mar 1 16:48:48 2021 -0800

HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed 
by Qi Zhu.

(cherry picked from commit 9501c698f4789188f744f3a1fba7f1a9bb9b8aa4)
---
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java  | 4 ++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b543311..319a0c2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4249,7 +4249,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return blockManager.getMissingReplOneBlocksCount();
   }
   
-  @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
+  @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"},
+  type = Metric.Type.COUNTER)
   public int getExpiredHeartbeats() {
 return datanodeStatistics.getExpiredHeartbeats();
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index b4d6fc9..69dbf64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -17,7 +17,7 @@
 */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 
@@ -143,7 +143,7 @@ public class TestDatanodeReport {
   assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
 
   Thread.sleep(5000);
-  assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
+  assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed by Qi Zhu.

2021-03-01 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 56679e8  HDFS-15849. ExpiredHeartbeats metric should be of 
Type.COUNTER. Contributed by Qi Zhu.
56679e8 is described below

commit 56679e83bf57dbb0c52858f5916d094d1e3853a5
Author: Konstantin V Shvachko 
AuthorDate: Mon Mar 1 16:48:48 2021 -0800

HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed 
by Qi Zhu.

(cherry picked from commit 9501c698f4789188f744f3a1fba7f1a9bb9b8aa4)
---
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java  | 4 ++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b706e67..6a6405b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4560,7 +4560,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return blockManager.getMissingReplOneBlocksCount();
   }
   
-  @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
+  @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"},
+  type = Metric.Type.COUNTER)
   public int getExpiredHeartbeats() {
 return datanodeStatistics.getExpiredHeartbeats();
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index b4d6fc9..69dbf64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -17,7 +17,7 @@
 */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 
@@ -143,7 +143,7 @@ public class TestDatanodeReport {
   assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
 
   Thread.sleep(5000);
-  assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
+  assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed by Qi Zhu.

2021-03-01 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9501c69  HDFS-15849. ExpiredHeartbeats metric should be of 
Type.COUNTER. Contributed by Qi Zhu.
9501c69 is described below

commit 9501c698f4789188f744f3a1fba7f1a9bb9b8aa4
Author: Konstantin V Shvachko 
AuthorDate: Mon Mar 1 16:48:48 2021 -0800

HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed 
by Qi Zhu.
---
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java  | 4 ++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index e98a59d..22b4b92 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4632,7 +4632,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return blockManager.getMissingReplOneBlocksCount();
   }
   
-  @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
+  @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"},
+  type = Metric.Type.COUNTER)
   public int getExpiredHeartbeats() {
 return datanodeStatistics.getExpiredHeartbeats();
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index b4d6fc9..69dbf64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -17,7 +17,7 @@
 */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 
@@ -143,7 +143,7 @@ public class TestDatanodeReport {
   assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
 
   Thread.sleep(5000);
-  assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
+  assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-15632. AbstractContractDeleteTest should set recursive peremeter to true for recursive test cases. Contributed by Anton Kutuzov.

2021-01-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 0ed997a  HDFS-15632. AbstractContractDeleteTest should set recursive 
peremeter to true for recursive test cases. Contributed by Anton Kutuzov.
0ed997a is described below

commit 0ed997abababef26ac06623e1a663d806fdbb17f
Author: Anton Kutuzov 
AuthorDate: Wed Jan 20 18:38:02 2021 +0300

HDFS-15632. AbstractContractDeleteTest should set recursive peremeter to 
true for recursive test cases. Contributed by Anton Kutuzov.

(cherry picked from commit 91d4ba57c5b85379303ac8fb2a1a03ba10b07d4e)
---
 .../org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java  | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
index 328c8e1..08df1d4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
@@ -49,18 +49,17 @@ public abstract class AbstractContractDeleteTest extends
 Path path = path("testDeleteNonexistentPathRecursive");
 assertPathDoesNotExist("leftover", path);
 ContractTestUtils.rejectRootOperation(path);
-assertFalse("Returned true attempting to delete"
+assertFalse("Returned true attempting to recursively delete"
 + " a nonexistent path " + path,
-getFileSystem().delete(path, false));
+getFileSystem().delete(path, true));
   }
 
-
   @Test
   public void testDeleteNonexistentPathNonRecursive() throws Throwable {
 Path path = path("testDeleteNonexistentPathNonRecursive");
 assertPathDoesNotExist("leftover", path);
 ContractTestUtils.rejectRootOperation(path);
-assertFalse("Returned true attempting to recursively delete"
+assertFalse("Returned true attempting to non recursively delete"
 + " a nonexistent path " + path,
 getFileSystem().delete(path, false));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-15632. AbstractContractDeleteTest should set recursive peremeter to true for recursive test cases. Contributed by Anton Kutuzov.

2021-01-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 397ff30  HDFS-15632. AbstractContractDeleteTest should set recursive 
peremeter to true for recursive test cases. Contributed by Anton Kutuzov.
397ff30 is described below

commit 397ff302b323d737909bb6717a849dcfa33d67a0
Author: Anton Kutuzov 
AuthorDate: Wed Jan 20 18:38:02 2021 +0300

HDFS-15632. AbstractContractDeleteTest should set recursive peremeter to 
true for recursive test cases. Contributed by Anton Kutuzov.

(cherry picked from commit 91d4ba57c5b85379303ac8fb2a1a03ba10b07d4e)
---
 .../org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java  | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
index 328c8e1..08df1d4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
@@ -49,18 +49,17 @@ public abstract class AbstractContractDeleteTest extends
 Path path = path("testDeleteNonexistentPathRecursive");
 assertPathDoesNotExist("leftover", path);
 ContractTestUtils.rejectRootOperation(path);
-assertFalse("Returned true attempting to delete"
+assertFalse("Returned true attempting to recursively delete"
 + " a nonexistent path " + path,
-getFileSystem().delete(path, false));
+getFileSystem().delete(path, true));
   }
 
-
   @Test
   public void testDeleteNonexistentPathNonRecursive() throws Throwable {
 Path path = path("testDeleteNonexistentPathNonRecursive");
 assertPathDoesNotExist("leftover", path);
 ContractTestUtils.rejectRootOperation(path);
-assertFalse("Returned true attempting to recursively delete"
+assertFalse("Returned true attempting to non recursively delete"
 + " a nonexistent path " + path,
 getFileSystem().delete(path, false));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-15632. AbstractContractDeleteTest should set recursive peremeter to true for recursive test cases. Contributed by Anton Kutuzov.

2021-01-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 16e908e  HDFS-15632. AbstractContractDeleteTest should set recursive 
peremeter to true for recursive test cases. Contributed by Anton Kutuzov.
16e908e is described below

commit 16e908e7577d6aea1a7c227e65b96213a9b1
Author: Anton Kutuzov 
AuthorDate: Wed Jan 20 18:38:02 2021 +0300

HDFS-15632. AbstractContractDeleteTest should set recursive peremeter to 
true for recursive test cases. Contributed by Anton Kutuzov.

(cherry picked from commit 91d4ba57c5b85379303ac8fb2a1a03ba10b07d4e)
---
 .../org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java  | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
index 328c8e1..08df1d4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
@@ -49,18 +49,17 @@ public abstract class AbstractContractDeleteTest extends
 Path path = path("testDeleteNonexistentPathRecursive");
 assertPathDoesNotExist("leftover", path);
 ContractTestUtils.rejectRootOperation(path);
-assertFalse("Returned true attempting to delete"
+assertFalse("Returned true attempting to recursively delete"
 + " a nonexistent path " + path,
-getFileSystem().delete(path, false));
+getFileSystem().delete(path, true));
   }
 
-
   @Test
   public void testDeleteNonexistentPathNonRecursive() throws Throwable {
 Path path = path("testDeleteNonexistentPathNonRecursive");
 assertPathDoesNotExist("leftover", path);
 ContractTestUtils.rejectRootOperation(path);
-assertFalse("Returned true attempting to recursively delete"
+assertFalse("Returned true attempting to non recursively delete"
 + " a nonexistent path " + path,
 getFileSystem().delete(path, false));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-15632. AbstractContractDeleteTest should set recursive peremeter to true for recursive test cases. Contributed by Anton Kutuzov.

2021-01-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new dcf6d77  HDFS-15632. AbstractContractDeleteTest should set recursive 
peremeter to true for recursive test cases. Contributed by Anton Kutuzov.
dcf6d77 is described below

commit dcf6d77279169386837179017dbd75290df17cc8
Author: Anton Kutuzov 
AuthorDate: Wed Jan 20 18:38:02 2021 +0300

HDFS-15632. AbstractContractDeleteTest should set recursive peremeter to 
true for recursive test cases. Contributed by Anton Kutuzov.

(cherry picked from commit 91d4ba57c5b85379303ac8fb2a1a03ba10b07d4e)
---
 .../org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java  | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
index 328c8e1..08df1d4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
@@ -49,18 +49,17 @@ public abstract class AbstractContractDeleteTest extends
 Path path = path("testDeleteNonexistentPathRecursive");
 assertPathDoesNotExist("leftover", path);
 ContractTestUtils.rejectRootOperation(path);
-assertFalse("Returned true attempting to delete"
+assertFalse("Returned true attempting to recursively delete"
 + " a nonexistent path " + path,
-getFileSystem().delete(path, false));
+getFileSystem().delete(path, true));
   }
 
-
   @Test
   public void testDeleteNonexistentPathNonRecursive() throws Throwable {
 Path path = path("testDeleteNonexistentPathNonRecursive");
 assertPathDoesNotExist("leftover", path);
 ContractTestUtils.rejectRootOperation(path);
-assertFalse("Returned true attempting to recursively delete"
+assertFalse("Returned true attempting to non recursively delete"
 + " a nonexistent path " + path,
 getFileSystem().delete(path, false));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15632. AbstractContractDeleteTest should set recursive peremeter to true for recursive test cases. Contributed by Anton Kutuzov.

2021-01-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 91d4ba5  HDFS-15632. AbstractContractDeleteTest should set recursive 
peremeter to true for recursive test cases. Contributed by Anton Kutuzov.
91d4ba5 is described below

commit 91d4ba57c5b85379303ac8fb2a1a03ba10b07d4e
Author: Anton Kutuzov 
AuthorDate: Wed Jan 20 18:38:02 2021 +0300

HDFS-15632. AbstractContractDeleteTest should set recursive peremeter to 
true for recursive test cases. Contributed by Anton Kutuzov.
---
 .../org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java  | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
index 328c8e1..08df1d4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
@@ -49,18 +49,17 @@ public abstract class AbstractContractDeleteTest extends
 Path path = path("testDeleteNonexistentPathRecursive");
 assertPathDoesNotExist("leftover", path);
 ContractTestUtils.rejectRootOperation(path);
-assertFalse("Returned true attempting to delete"
+assertFalse("Returned true attempting to recursively delete"
 + " a nonexistent path " + path,
-getFileSystem().delete(path, false));
+getFileSystem().delete(path, true));
   }
 
-
   @Test
   public void testDeleteNonexistentPathNonRecursive() throws Throwable {
 Path path = path("testDeleteNonexistentPathNonRecursive");
 assertPathDoesNotExist("leftover", path);
 ContractTestUtils.rejectRootOperation(path);
-assertFalse("Returned true attempting to recursively delete"
+assertFalse("Returned true attempting to non recursively delete"
 + " a nonexistent path " + path,
 getFileSystem().delete(path, false));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HADOOP-16208. Do Not Log InterruptedException in Client.

2020-12-22 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 16fd00d  HADOOP-16208. Do Not Log InterruptedException in Client.
16fd00d is described below

commit 16fd00d62de5ee42fc2abeafdcb186bbfbdff7af
Author: David Mollitor 
AuthorDate: Thu Apr 4 21:13:09 2019 +0100

HADOOP-16208. Do Not Log InterruptedException in Client.

Contributed by David Mollitor.

(cherry picked from commit c90736350ba158c7872a39426e7a29c5e5e0bb48)
---
 .../hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index bcabf00..ed9def1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1456,10 +1456,12 @@ public class Client implements AutoCloseable {
 connection.sendRpcRequest(call); // send the rpc 
request
   } catch (RejectedExecutionException e) {
 throw new IOException("connection has been closed", e);
-  } catch (InterruptedException e) {
+  } catch (InterruptedException ie) {
 Thread.currentThread().interrupt();
-LOG.warn("interrupted waiting to send rpc request to server", e);
-throw new IOException(e);
+IOException ioe = new InterruptedIOException(
+"Interrupted waiting to send RPC request to server");
+ioe.initCause(ie);
+throw ioe;
   }
 } catch(Exception e) {
   if (isAsynchronousMode()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-15623. Respect configured values of rpc.engine (#2403) Contributed by Hector Chaverri.

2020-11-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 9160b0e  HDFS-15623. Respect configured values of rpc.engine (#2403) 
Contributed by Hector Chaverri.
9160b0e is described below

commit 9160b0e8c8e2b0e4cfd61c397814b71449d11448
Author: hchaverr 
AuthorDate: Thu Oct 22 10:32:28 2020 -0700

HDFS-15623. Respect configured values of rpc.engine (#2403) Contributed by 
Hector Chaverri.

(cherry picked from commit 6eacaffeea21e7e9286497ee17f89fd939d2eead)
---
 .../src/main/java/org/apache/hadoop/ipc/RPC.java |  8 ++--
 .../src/test/java/org/apache/hadoop/ipc/TestRPC.java | 12 
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 07dcbdd..6186740 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -191,14 +191,18 @@ public class RPC {
   private static final String ENGINE_PROP = "rpc.engine";
 
   /**
-   * Set a protocol to use a non-default RpcEngine.
+   * Set a protocol to use a non-default RpcEngine if one
+   * is not specified in the configuration.
* @param conf configuration to use
* @param protocol the protocol interface
* @param engine the RpcEngine impl
*/
   public static void setProtocolEngine(Configuration conf,
 Class protocol, Class engine) {
-conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine, RpcEngine.class);
+if (conf.get(ENGINE_PROP+"."+protocol.getName()) == null) {
+  conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine,
+RpcEngine.class);
+}
   }
 
   // return the RpcEngine configured to handle a protocol
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 2034a2e..ac99900 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -1518,6 +1518,18 @@ public class TestRPC extends TestRpcBase {
 }
   }
 
+  @Test
+  public void testSetProtocolEngine() {
+Configuration conf = new Configuration();
+RPC.setProtocolEngine(conf, StoppedProtocol.class, StoppedRpcEngine.class);
+RpcEngine rpcEngine = RPC.getProtocolEngine(StoppedProtocol.class, conf);
+assertTrue(rpcEngine instanceof StoppedRpcEngine);
+
+RPC.setProtocolEngine(conf, StoppedProtocol.class, 
ProtobufRpcEngine.class);
+rpcEngine = RPC.getProtocolEngine(StoppedProtocol.class, conf);
+assertTrue(rpcEngine instanceof StoppedRpcEngine);
+  }
+
   public static void main(String[] args) throws Exception {
 new TestRPC().testCallsInternal(conf);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-15623. Respect configured values of rpc.engine (#2403) Contributed by Hector Chaverri.

2020-11-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 3443d7594 HDFS-15623. Respect configured values of rpc.engine (#2403) 
Contributed by Hector Chaverri.
3443d7594 is described below

commit 3443d759468e7f0c7450a713e360238289a6378b
Author: hchaverr 
AuthorDate: Thu Oct 22 10:32:28 2020 -0700

HDFS-15623. Respect configured values of rpc.engine (#2403) Contributed by 
Hector Chaverri.

(cherry picked from commit 6eacaffeea21e7e9286497ee17f89fd939d2eead)
---
 .../src/main/java/org/apache/hadoop/ipc/RPC.java |  8 ++--
 .../src/test/java/org/apache/hadoop/ipc/TestRPC.java | 12 
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 9287b5a..9f87864 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -193,14 +193,18 @@ public class RPC {
   private static final String ENGINE_PROP = "rpc.engine";
 
   /**
-   * Set a protocol to use a non-default RpcEngine.
+   * Set a protocol to use a non-default RpcEngine if one
+   * is not specified in the configuration.
* @param conf configuration to use
* @param protocol the protocol interface
* @param engine the RpcEngine impl
*/
   public static void setProtocolEngine(Configuration conf,
 Class protocol, Class engine) {
-conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine, RpcEngine.class);
+if (conf.get(ENGINE_PROP+"."+protocol.getName()) == null) {
+  conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine,
+RpcEngine.class);
+}
   }
 
   // return the RpcEngine configured to handle a protocol
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 3d723c9..2d50902 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -1501,6 +1501,18 @@ public class TestRPC extends TestRpcBase {
 }
   }
 
+  @Test
+  public void testSetProtocolEngine() {
+Configuration conf = new Configuration();
+RPC.setProtocolEngine(conf, StoppedProtocol.class, StoppedRpcEngine.class);
+RpcEngine rpcEngine = RPC.getProtocolEngine(StoppedProtocol.class, conf);
+assertTrue(rpcEngine instanceof StoppedRpcEngine);
+
+RPC.setProtocolEngine(conf, StoppedProtocol.class, 
ProtobufRpcEngine.class);
+rpcEngine = RPC.getProtocolEngine(StoppedProtocol.class, conf);
+assertTrue(rpcEngine instanceof StoppedRpcEngine);
+  }
+
   public static void main(String[] args) throws Exception {
 new TestRPC().testCallsInternal(conf);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-15623. Respect configured values of rpc.engine (#2403) Contributed by Hector Chaverri.

2020-11-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 92db25d  HDFS-15623. Respect configured values of rpc.engine (#2403) 
Contributed by Hector Chaverri.
92db25d is described below

commit 92db25d64bf3a902f0b34853f522cd5de863a541
Author: hchaverr 
AuthorDate: Thu Oct 22 10:32:28 2020 -0700

HDFS-15623. Respect configured values of rpc.engine (#2403) Contributed by 
Hector Chaverri.

(cherry picked from commit 6eacaffeea21e7e9286497ee17f89fd939d2eead)
---
 .../src/main/java/org/apache/hadoop/ipc/RPC.java |  8 ++--
 .../src/test/java/org/apache/hadoop/ipc/TestRPC.java | 12 
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 0be5cb5..d892997 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -195,14 +195,18 @@ public class RPC {
   private static final String ENGINE_PROP = "rpc.engine";
 
   /**
-   * Set a protocol to use a non-default RpcEngine.
+   * Set a protocol to use a non-default RpcEngine if one
+   * is not specified in the configuration.
* @param conf configuration to use
* @param protocol the protocol interface
* @param engine the RpcEngine impl
*/
   public static void setProtocolEngine(Configuration conf,
 Class protocol, Class engine) {
-conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine, RpcEngine.class);
+if (conf.get(ENGINE_PROP+"."+protocol.getName()) == null) {
+  conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine,
+RpcEngine.class);
+}
   }
 
   // return the RpcEngine configured to handle a protocol
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 931bbf6..f0d9baf 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -1553,6 +1553,18 @@ public class TestRPC extends TestRpcBase {
 }
   }
 
+  @Test
+  public void testSetProtocolEngine() {
+Configuration conf = new Configuration();
+RPC.setProtocolEngine(conf, StoppedProtocol.class, StoppedRpcEngine.class);
+RpcEngine rpcEngine = RPC.getProtocolEngine(StoppedProtocol.class, conf);
+assertTrue(rpcEngine instanceof StoppedRpcEngine);
+
+RPC.setProtocolEngine(conf, StoppedProtocol.class, 
ProtobufRpcEngine.class);
+rpcEngine = RPC.getProtocolEngine(StoppedProtocol.class, conf);
+assertTrue(rpcEngine instanceof StoppedRpcEngine);
+  }
+
   public static void main(String[] args) throws Exception {
 new TestRPC().testCallsInternal(conf);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-15623. Respect configured values of rpc.engine (#2403) Contributed by Hector Chaverri.

2020-11-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 043cca0  HDFS-15623. Respect configured values of rpc.engine (#2403) 
Contributed by Hector Chaverri.
043cca0 is described below

commit 043cca01b1cc169d26a82945bfb9c2ccfb1fdf19
Author: hchaverr 
AuthorDate: Thu Oct 22 10:32:28 2020 -0700

HDFS-15623. Respect configured values of rpc.engine (#2403) Contributed by 
Hector Chaverri.

(cherry picked from commit 6eacaffeea21e7e9286497ee17f89fd939d2eead)
---
 .../src/main/java/org/apache/hadoop/ipc/RPC.java |  8 ++--
 .../src/test/java/org/apache/hadoop/ipc/TestRPC.java | 12 
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index e794cb9..ad3628d01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -195,14 +195,18 @@ public class RPC {
   private static final String ENGINE_PROP = "rpc.engine";
 
   /**
-   * Set a protocol to use a non-default RpcEngine.
+   * Set a protocol to use a non-default RpcEngine if one
+   * is not specified in the configuration.
* @param conf configuration to use
* @param protocol the protocol interface
* @param engine the RpcEngine impl
*/
   public static void setProtocolEngine(Configuration conf,
 Class protocol, Class engine) {
-conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine, RpcEngine.class);
+if (conf.get(ENGINE_PROP+"."+protocol.getName()) == null) {
+  conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine,
+RpcEngine.class);
+}
   }
 
   // return the RpcEngine configured to handle a protocol
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index cd2433a..628c044 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -1554,6 +1554,18 @@ public class TestRPC extends TestRpcBase {
 }
   }
 
+  @Test
+  public void testSetProtocolEngine() {
+Configuration conf = new Configuration();
+RPC.setProtocolEngine(conf, StoppedProtocol.class, StoppedRpcEngine.class);
+RpcEngine rpcEngine = RPC.getProtocolEngine(StoppedProtocol.class, conf);
+assertTrue(rpcEngine instanceof StoppedRpcEngine);
+
+RPC.setProtocolEngine(conf, StoppedProtocol.class, 
ProtobufRpcEngine.class);
+rpcEngine = RPC.getProtocolEngine(StoppedProtocol.class, conf);
+assertTrue(rpcEngine instanceof StoppedRpcEngine);
+  }
+
   public static void main(String[] args) throws Exception {
 new TestRPC().testCallsInternal(conf);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15623. Respect configured values of rpc.engine (#2403) Contributed by Hector Chaverri.

2020-11-06 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6eacaff  HDFS-15623. Respect configured values of rpc.engine (#2403) 
Contributed by Hector Chaverri.
6eacaff is described below

commit 6eacaffeea21e7e9286497ee17f89fd939d2eead
Author: hchaverr 
AuthorDate: Thu Oct 22 10:32:28 2020 -0700

HDFS-15623. Respect configured values of rpc.engine (#2403) Contributed by 
Hector Chaverri.
---
 .../src/main/java/org/apache/hadoop/ipc/RPC.java |  8 ++--
 .../src/test/java/org/apache/hadoop/ipc/TestRPC.java | 12 
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index e794cb9..ad3628d01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -195,14 +195,18 @@ public class RPC {
   private static final String ENGINE_PROP = "rpc.engine";
 
   /**
-   * Set a protocol to use a non-default RpcEngine.
+   * Set a protocol to use a non-default RpcEngine if one
+   * is not specified in the configuration.
* @param conf configuration to use
* @param protocol the protocol interface
* @param engine the RpcEngine impl
*/
   public static void setProtocolEngine(Configuration conf,
 Class protocol, Class engine) {
-conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine, RpcEngine.class);
+if (conf.get(ENGINE_PROP+"."+protocol.getName()) == null) {
+  conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine,
+RpcEngine.class);
+}
   }
 
   // return the RpcEngine configured to handle a protocol
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index cd2433a..628c044 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -1554,6 +1554,18 @@ public class TestRPC extends TestRpcBase {
 }
   }
 
+  @Test
+  public void testSetProtocolEngine() {
+Configuration conf = new Configuration();
+RPC.setProtocolEngine(conf, StoppedProtocol.class, StoppedRpcEngine.class);
+RpcEngine rpcEngine = RPC.getProtocolEngine(StoppedProtocol.class, conf);
+assertTrue(rpcEngine instanceof StoppedRpcEngine);
+
+RPC.setProtocolEngine(conf, StoppedProtocol.class, 
ProtobufRpcEngine.class);
+rpcEngine = RPC.getProtocolEngine(StoppedProtocol.class, conf);
+assertTrue(rpcEngine instanceof StoppedRpcEngine);
+  }
+
   public static void main(String[] args) throws Exception {
 new TestRPC().testCallsInternal(conf);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-15665. Balancer logging improvements. Contributed by Konstantin V Shvachko.

2020-11-03 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new a9bf374  HDFS-15665. Balancer logging improvements. Contributed by 
Konstantin V Shvachko.
a9bf374 is described below

commit a9bf3743801558f7b453e5f6db86ca8e935e3763
Author: Konstantin V Shvachko 
AuthorDate: Tue Nov 3 12:01:30 2020 -0800

HDFS-15665. Balancer logging improvements. Contributed by Konstantin V 
Shvachko.

(cherry picked from commit d07dc7afb4aa0d6cc9f9be530802e54610776a4d)
---
 .../hadoop/hdfs/server/balancer/Balancer.java  | 42 +-
 .../hadoop/hdfs/server/balancer/Dispatcher.java| 27 ++
 .../hdfs/server/balancer/NameNodeConnector.java| 14 
 .../hadoop/hdfs/server/balancer/TestBalancer.java  |  2 +-
 .../TestBalancerWithMultipleNameNodes.java | 12 +++
 5 files changed, 68 insertions(+), 29 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index be72eb4..bb4b697 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -34,8 +34,6 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -64,7 +62,8 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import static com.google.common.base.Preconditions.checkArgument;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 
@@ -171,7 +170,7 @@ import com.google.common.base.Preconditions;
 
 @InterfaceAudience.Private
 public class Balancer {
-  static final Log LOG = LogFactory.getLog(Balancer.class);
+  static final Logger LOG = LoggerFactory.getLogger(Balancer.class);
 
   static final Path BALANCER_ID_PATH = new Path("/system/balancer.id");
 
@@ -261,6 +260,9 @@ public class Balancer {
*/
   Balancer(NameNodeConnector theblockpool, BalancerParameters p,
   Configuration conf) {
+// NameNode configuration parameters for balancing
+getInt(conf, DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_KEY,
+DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_DEFAULT);
 final long movedWinWidth = getLong(conf,
 DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
 DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
@@ -270,10 +272,6 @@ public class Balancer {
 final int dispatcherThreads = getInt(conf,
 DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY,
 DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_DEFAULT);
-final int maxConcurrentMovesPerNode = getInt(conf,
-DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
-DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
-
 final long getBlocksSize = getLongBytes(conf,
 DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_KEY,
 DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_DEFAULT);
@@ -290,6 +288,13 @@ public class Balancer {
 DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY,
 DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT);
 
+// DataNode configuration parameters for balancing
+final int maxConcurrentMovesPerNode = getInt(conf,
+DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
+getLongBytes(conf, DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY,
+DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT);
+
 this.nnc = theblockpool;
 this.dispatcher =
 new Dispatcher(theblockpool, p.getIncludedNodes(),
@@ -582,12 +587,13 @@ public class Balancer {
   this.bytesAlreadyMoved = bytesAlreadyMoved;
 }
 
-void print(int iteration, PrintStream out) {
-  out.printf("%-24s %10d  %19s  %18s  %17s%n",
+void print(int iteration, NameNodeConnector nnc, PrintStream out) {
+  out.printf("%-24s %10d  %19s  %18s  %17s  %s%n",
   DateFormat.getDateTimeInstance().format(new Date()), iteration,
   StringUtils.byteDesc(bytesAlreadyMoved),
   StringUtils.byteDesc(bytesLeftToMove),
-  StringUtils.byteDesc(bytesBeingMoved));
+  StringUtils.byteDesc(bytesBeingMoved),

[hadoop] branch branch-3.1 updated: HDFS-15665. Balancer logging improvements. Contributed by Konstantin V Shvachko.

2020-11-03 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 079f1bb  HDFS-15665. Balancer logging improvements. Contributed by 
Konstantin V Shvachko.
079f1bb is described below

commit 079f1bb2f91c0e8448d47f1d82fe5ac106065121
Author: Konstantin V Shvachko 
AuthorDate: Tue Nov 3 12:01:30 2020 -0800

HDFS-15665. Balancer logging improvements. Contributed by Konstantin V 
Shvachko.

(cherry picked from commit d07dc7afb4aa0d6cc9f9be530802e54610776a4d)
---
 .../hadoop/hdfs/server/balancer/Balancer.java  | 41 ++
 .../hadoop/hdfs/server/balancer/Dispatcher.java| 26 +++---
 .../hdfs/server/balancer/NameNodeConnector.java| 14 
 .../hadoop/hdfs/server/balancer/TestBalancer.java  |  2 +-
 .../TestBalancerWithMultipleNameNodes.java | 12 +++
 5 files changed, 68 insertions(+), 27 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 0aea7ff..a1b7105 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -36,8 +36,6 @@ import java.util.List;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -66,6 +64,8 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 
@@ -172,7 +172,7 @@ import com.google.common.base.Preconditions;
 
 @InterfaceAudience.Private
 public class Balancer {
-  static final Log LOG = LogFactory.getLog(Balancer.class);
+  static final Logger LOG = LoggerFactory.getLogger(Balancer.class);
 
   static final Path BALANCER_ID_PATH = new Path("/system/balancer.id");
 
@@ -264,6 +264,9 @@ public class Balancer {
*/
   Balancer(NameNodeConnector theblockpool, BalancerParameters p,
   Configuration conf) {
+// NameNode configuration parameters for balancing
+getInt(conf, DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_KEY,
+DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_DEFAULT);
 final long movedWinWidth = getLong(conf,
 DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
 DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
@@ -273,10 +276,6 @@ public class Balancer {
 final int dispatcherThreads = getInt(conf,
 DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY,
 DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_DEFAULT);
-final int maxConcurrentMovesPerNode = getInt(conf,
-DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
-DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
-
 final long getBlocksSize = getLongBytes(conf,
 DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_KEY,
 DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_DEFAULT);
@@ -293,6 +292,13 @@ public class Balancer {
 DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY,
 DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT);
 
+// DataNode configuration parameters for balancing
+final int maxConcurrentMovesPerNode = getInt(conf,
+DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
+getLongBytes(conf, DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY,
+DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT);
+
 this.nnc = theblockpool;
 this.dispatcher =
 new Dispatcher(theblockpool, p.getIncludedNodes(),
@@ -585,12 +591,13 @@ public class Balancer {
   this.bytesAlreadyMoved = bytesAlreadyMoved;
 }
 
-void print(int iteration, PrintStream out) {
-  out.printf("%-24s %10d  %19s  %18s  %17s%n",
+void print(int iteration, NameNodeConnector nnc, PrintStream out) {
+  out.printf("%-24s %10d  %19s  %18s  %17s  %s%n",
   DateFormat.getDateTimeInstance().format(new Date()), iteration,
   StringUtils.byteDesc(bytesAlreadyMoved),
   StringUtils.byteDesc(bytesLeftToMove),
-  StringUtils.byteDesc(bytesBeingMoved));
+  StringUtils.byteDesc(bytesBeingMoved),
+  nnc.getNameNodeUri());
 }
   }
 
@@ -636,8 +6

[hadoop] branch branch-3.2 updated: HDFS-15665. Balancer logging improvements. Contributed by Konstantin V Shvachko.

2020-11-03 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 23dcd8ed HDFS-15665. Balancer logging improvements. Contributed by 
Konstantin V Shvachko.
23dcd8ed is described below

commit 23dcd8edb68fadc95c5903ac8249e3e3c4ea7424
Author: Konstantin V Shvachko 
AuthorDate: Tue Nov 3 12:01:30 2020 -0800

HDFS-15665. Balancer logging improvements. Contributed by Konstantin V 
Shvachko.

(cherry picked from commit d07dc7afb4aa0d6cc9f9be530802e54610776a4d)
---
 .../hadoop/hdfs/server/balancer/Balancer.java  | 33 ++
 .../hadoop/hdfs/server/balancer/Dispatcher.java| 20 +++--
 .../hdfs/server/balancer/NameNodeConnector.java| 14 +
 .../hadoop/hdfs/server/balancer/TestBalancer.java  |  2 +-
 4 files changed, 55 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index d9b50c3..03c49d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -264,6 +264,9 @@ public class Balancer {
*/
   Balancer(NameNodeConnector theblockpool, BalancerParameters p,
   Configuration conf) {
+// NameNode configuration parameters for balancing
+getInt(conf, DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_KEY,
+DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_DEFAULT);
 final long movedWinWidth = getLong(conf,
 DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
 DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
@@ -273,10 +276,6 @@ public class Balancer {
 final int dispatcherThreads = getInt(conf,
 DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY,
 DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_DEFAULT);
-final int maxConcurrentMovesPerNode = getInt(conf,
-DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
-DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
-
 final long getBlocksSize = getLongBytes(conf,
 DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_KEY,
 DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_DEFAULT);
@@ -293,6 +292,13 @@ public class Balancer {
 DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY,
 DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT);
 
+// DataNode configuration parameters for balancing
+final int maxConcurrentMovesPerNode = getInt(conf,
+DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
+getLongBytes(conf, DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY,
+DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT);
+
 this.nnc = theblockpool;
 this.dispatcher =
 new Dispatcher(theblockpool, p.getIncludedNodes(),
@@ -585,12 +591,13 @@ public class Balancer {
   this.bytesAlreadyMoved = bytesAlreadyMoved;
 }
 
-void print(int iteration, PrintStream out) {
-  out.printf("%-24s %10d  %19s  %18s  %17s%n",
+void print(int iteration, NameNodeConnector nnc, PrintStream out) {
+  out.printf("%-24s %10d  %19s  %18s  %17s  %s%n",
   DateFormat.getDateTimeInstance().format(new Date()), iteration,
   StringUtils.byteDesc(bytesAlreadyMoved),
   StringUtils.byteDesc(bytesLeftToMove),
-  StringUtils.byteDesc(bytesBeingMoved));
+  StringUtils.byteDesc(bytesBeingMoved),
+  nnc.getNameNodeUri());
 }
   }
 
@@ -636,8 +643,10 @@ public class Balancer {
 System.out.println("No block can be moved. Exiting...");
 return newResult(ExitStatus.NO_MOVE_BLOCK, bytesLeftToMove, 
bytesBeingMoved);
   } else {
-LOG.info( "Will move " + StringUtils.byteDesc(bytesBeingMoved) +
-" in this iteration");
+LOG.info("Will move {}  in this iteration for {}",
+StringUtils.byteDesc(bytesBeingMoved), nnc.toString());
+LOG.info("Total target DataNodes in this iteration: {}",
+dispatcher.moveTasksTotal());
   }
 
   /* For each pair of , start a thread that repeatedly 
@@ -687,7 +696,9 @@ public class Balancer {
 LOG.info("excluded nodes = " + p.getExcludedNodes());
 LOG.info("source nodes = " + p.getSourceNodes());
 checkKeytabAndInit(conf);
-System.out.println("Time Stamp   Iteration#  Bytes Already 
Moved  Bytes Left To Move  Bytes Being Moved");
+System.out.println("Time

[hadoop] branch branch-3.3 updated: HDFS-15665. Balancer logging improvements. Contributed by Konstantin V Shvachko.

2020-11-03 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new e48dd9d  HDFS-15665. Balancer logging improvements. Contributed by 
Konstantin V Shvachko.
e48dd9d is described below

commit e48dd9daeafc091ea85ce41139e9af362b67433a
Author: Konstantin V Shvachko 
AuthorDate: Tue Nov 3 12:01:30 2020 -0800

HDFS-15665. Balancer logging improvements. Contributed by Konstantin V 
Shvachko.

(cherry picked from commit d07dc7afb4aa0d6cc9f9be530802e54610776a4d)
---
 .../hadoop/hdfs/server/balancer/Balancer.java  | 33 ++
 .../hadoop/hdfs/server/balancer/Dispatcher.java| 20 +++--
 .../hdfs/server/balancer/NameNodeConnector.java| 14 +
 .../hadoop/hdfs/server/balancer/TestBalancer.java  |  2 +-
 4 files changed, 55 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 753a270..48938bf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -282,6 +282,9 @@ public class Balancer {
*/
   Balancer(NameNodeConnector theblockpool, BalancerParameters p,
   Configuration conf) {
+// NameNode configuration parameters for balancing
+getInt(conf, DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_KEY,
+DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_DEFAULT);
 final long movedWinWidth = getLong(conf,
 DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
 DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
@@ -291,10 +294,6 @@ public class Balancer {
 final int dispatcherThreads = getInt(conf,
 DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY,
 DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_DEFAULT);
-final int maxConcurrentMovesPerNode = getInt(conf,
-DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
-DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
-
 final long getBlocksSize = getLongBytes(conf,
 DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_KEY,
 DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_DEFAULT);
@@ -311,6 +310,13 @@ public class Balancer {
 DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY,
 DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT);
 
+// DataNode configuration parameters for balancing
+final int maxConcurrentMovesPerNode = getInt(conf,
+DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
+getLongBytes(conf, DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY,
+DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT);
+
 this.nnc = theblockpool;
 this.dispatcher =
 new Dispatcher(theblockpool, p.getIncludedNodes(),
@@ -603,12 +609,13 @@ public class Balancer {
   this.bytesAlreadyMoved = bytesAlreadyMoved;
 }
 
-void print(int iteration, PrintStream out) {
-  out.printf("%-24s %10d  %19s  %18s  %17s%n",
+void print(int iteration, NameNodeConnector nnc, PrintStream out) {
+  out.printf("%-24s %10d  %19s  %18s  %17s  %s%n",
   DateFormat.getDateTimeInstance().format(new Date()), iteration,
   StringUtils.byteDesc(bytesAlreadyMoved),
   StringUtils.byteDesc(bytesLeftToMove),
-  StringUtils.byteDesc(bytesBeingMoved));
+  StringUtils.byteDesc(bytesBeingMoved),
+  nnc.getNameNodeUri());
 }
   }
 
@@ -653,8 +660,10 @@ public class Balancer {
 System.out.println("No block can be moved. Exiting...");
 return newResult(ExitStatus.NO_MOVE_BLOCK, bytesLeftToMove, 
bytesBeingMoved);
   } else {
-LOG.info( "Will move " + StringUtils.byteDesc(bytesBeingMoved) +
-" in this iteration");
+LOG.info("Will move {}  in this iteration for {}",
+StringUtils.byteDesc(bytesBeingMoved), nnc.toString());
+LOG.info("Total target DataNodes in this iteration: {}",
+dispatcher.moveTasksTotal());
   }
 
   /* For each pair of , start a thread that repeatedly 
@@ -705,7 +714,9 @@ public class Balancer {
 LOG.info("excluded nodes = " + p.getExcludedNodes());
 LOG.info("source nodes = " + p.getSourceNodes());
 checkKeytabAndInit(conf);
-System.out.println("Time Stamp   Iteration#  Bytes Already 
Moved  Bytes Left To Move  Bytes Being Moved");
+System.out.println("Time

[hadoop] branch trunk updated: HDFS-15665. Balancer logging improvements. Contributed by Konstantin V Shvachko.

2020-11-03 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d07dc7a  HDFS-15665. Balancer logging improvements. Contributed by 
Konstantin V Shvachko.
d07dc7a is described below

commit d07dc7afb4aa0d6cc9f9be530802e54610776a4d
Author: Konstantin V Shvachko 
AuthorDate: Tue Nov 3 12:01:30 2020 -0800

HDFS-15665. Balancer logging improvements. Contributed by Konstantin V 
Shvachko.
---
 .../hadoop/hdfs/server/balancer/Balancer.java  | 33 ++
 .../hadoop/hdfs/server/balancer/Dispatcher.java| 20 +++--
 .../hdfs/server/balancer/NameNodeConnector.java| 14 +
 .../hadoop/hdfs/server/balancer/TestBalancer.java  |  2 +-
 4 files changed, 55 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 2de1cd6..33b5fa4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -282,6 +282,9 @@ public class Balancer {
*/
   Balancer(NameNodeConnector theblockpool, BalancerParameters p,
   Configuration conf) {
+// NameNode configuration parameters for balancing
+getInt(conf, DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_KEY,
+DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_DEFAULT);
 final long movedWinWidth = getLong(conf,
 DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
 DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
@@ -291,10 +294,6 @@ public class Balancer {
 final int dispatcherThreads = getInt(conf,
 DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY,
 DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_DEFAULT);
-final int maxConcurrentMovesPerNode = getInt(conf,
-DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
-DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
-
 final long getBlocksSize = getLongBytes(conf,
 DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_KEY,
 DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_DEFAULT);
@@ -311,6 +310,13 @@ public class Balancer {
 DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY,
 DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT);
 
+// DataNode configuration parameters for balancing
+final int maxConcurrentMovesPerNode = getInt(conf,
+DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
+getLongBytes(conf, DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY,
+DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT);
+
 this.nnc = theblockpool;
 this.dispatcher =
 new Dispatcher(theblockpool, p.getIncludedNodes(),
@@ -603,12 +609,13 @@ public class Balancer {
   this.bytesAlreadyMoved = bytesAlreadyMoved;
 }
 
-void print(int iteration, PrintStream out) {
-  out.printf("%-24s %10d  %19s  %18s  %17s%n",
+void print(int iteration, NameNodeConnector nnc, PrintStream out) {
+  out.printf("%-24s %10d  %19s  %18s  %17s  %s%n",
   DateFormat.getDateTimeInstance().format(new Date()), iteration,
   StringUtils.byteDesc(bytesAlreadyMoved),
   StringUtils.byteDesc(bytesLeftToMove),
-  StringUtils.byteDesc(bytesBeingMoved));
+  StringUtils.byteDesc(bytesBeingMoved),
+  nnc.getNameNodeUri());
 }
   }
 
@@ -653,8 +660,10 @@ public class Balancer {
 System.out.println("No block can be moved. Exiting...");
 return newResult(ExitStatus.NO_MOVE_BLOCK, bytesLeftToMove, 
bytesBeingMoved);
   } else {
-LOG.info( "Will move " + StringUtils.byteDesc(bytesBeingMoved) +
-" in this iteration");
+LOG.info("Will move {}  in this iteration for {}",
+StringUtils.byteDesc(bytesBeingMoved), nnc.toString());
+LOG.info("Total target DataNodes in this iteration: {}",
+dispatcher.moveTasksTotal());
   }
 
   /* For each pair of , start a thread that repeatedly 
@@ -705,7 +714,9 @@ public class Balancer {
 LOG.info("excluded nodes = " + p.getExcludedNodes());
 LOG.info("source nodes = " + p.getSourceNodes());
 checkKeytabAndInit(conf);
-System.out.println("Time Stamp   Iteration#  Bytes Already 
Moved  Bytes Left To Move  Bytes Being Moved");
+System.out.println("Time Stamp   Iteration#"
++ "  Bytes Already Moved  Bytes Left T

  1   2   3   4   5   6   7   8   9   10   >