Hexiaoqiao commented on a change in pull request #2981:
URL: https://github.com/apache/hadoop/pull/2981#discussion_r631534832



##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
##########
@@ -171,7 +179,9 @@ private String getUsage(String cmd) {
       return "\t[-clrQuota <path>]";
     } else if (cmd.equals("-clrStorageTypeQuota")) {
       return "\t[-clrStorageTypeQuota <path>]";
-    } else if (cmd.equals("-safemode")) {
+    }  else if (cmd.equals("-initViewFsToMountTable")) {
+      return "\t[-initViewFsToMountTable <clusterName>] | allClusters";

Review comment:
       it should be `\t[-initViewFsToMountTable <clusterName> | allClusters]`?

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
##########
@@ -384,6 +398,13 @@ public int run(String[] argv) throws Exception {
         getDisabledNameservices();
       } else if ("-refresh".equals(cmd)) {
         refresh(address);
+      } else if ("-initViewFsToMountTable".equals(cmd)) {
+        if (initViewFsToMountTable(argv[i])) {
+          System.out.println("Successfully init ViewFs mapping to router " +
+              argv[i]);
+        } else {

Review comment:
       if this command run failed, we should print err information such as 
`System.err.println("Failed when execute command 'initViewFsToMountTable'")` or 
something else;

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
##########
@@ -1036,6 +1057,83 @@ private boolean updateQuota(String mount, long nsQuota, 
long ssQuota)
     return updateResponse.getStatus();
   }
 
+  /**
+   * Initialize the ViewFS mount point to the Router,
+   * either to specify a cluster or to initialize it all.
+   * @param clusterName The specified cluster to initialize,
+   * AllCluster was then all clusters.
+   * @return If the quota was updated.
+   * @throws IOException Error adding the mount point.
+   */
+  public boolean initViewFsToMountTable(String clusterName)
+      throws IOException {
+    // fs.viewfs.mounttable.ClusterX.link./data
+    final String mountTablePrefix;
+    if (clusterName.equals(ALL_CLUSTERS)) {
+      mountTablePrefix =
+          Constants.CONFIG_VIEWFS_PREFIX + ".*" +
+              Constants.CONFIG_VIEWFS_LINK + ".";
+    } else {
+      mountTablePrefix =
+          Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." +
+              Constants.CONFIG_VIEWFS_LINK + ".";
+    }
+    final String rootPath = "/";
+    Map<String, String> viewFsMap = getConf().getValByRegex(
+        mountTablePrefix  + rootPath);
+    if (viewFsMap.isEmpty()) {
+      System.out.println("There is no ViewFs mapping to initialize.");
+      return true;
+    }
+    for (Entry<String, String> entry : viewFsMap.entrySet()) {
+      Path path = new Path(entry.getValue());
+      URI destUri = path.toUri();
+      String mountKey = entry.getKey();
+      DestinationOrder order = DestinationOrder.HASH;
+      String mount = mountKey.replaceAll(mountTablePrefix, "");
+      if (!destUri.getScheme().equals("hdfs")) {

Review comment:
       It is better to use HdfsConstants#HDFS_URI_SCHEME?

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
##########
@@ -1036,6 +1057,83 @@ private boolean updateQuota(String mount, long nsQuota, 
long ssQuota)
     return updateResponse.getStatus();
   }
 
+  /**
+   * Initialize the ViewFS mount point to the Router,
+   * either to specify a cluster or to initialize it all.
+   * @param clusterName The specified cluster to initialize,
+   * AllCluster was then all clusters.
+   * @return If the quota was updated.
+   * @throws IOException Error adding the mount point.
+   */
+  public boolean initViewFsToMountTable(String clusterName)
+      throws IOException {
+    // fs.viewfs.mounttable.ClusterX.link./data
+    final String mountTablePrefix;
+    if (clusterName.equals(ALL_CLUSTERS)) {
+      mountTablePrefix =
+          Constants.CONFIG_VIEWFS_PREFIX + ".*" +
+              Constants.CONFIG_VIEWFS_LINK + ".";
+    } else {
+      mountTablePrefix =
+          Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." +
+              Constants.CONFIG_VIEWFS_LINK + ".";
+    }
+    final String rootPath = "/";
+    Map<String, String> viewFsMap = getConf().getValByRegex(
+        mountTablePrefix  + rootPath);
+    if (viewFsMap.isEmpty()) {
+      System.out.println("There is no ViewFs mapping to initialize.");
+      return true;
+    }
+    for (Entry<String, String> entry : viewFsMap.entrySet()) {
+      Path path = new Path(entry.getValue());
+      URI destUri = path.toUri();
+      String mountKey = entry.getKey();
+      DestinationOrder order = DestinationOrder.HASH;
+      String mount = mountKey.replaceAll(mountTablePrefix, "");
+      if (!destUri.getScheme().equals("hdfs")) {
+        System.out.println("Only supports HDFS, " +
+            "added Mount Point failed , " + mountKey);
+      }
+      if (!mount.startsWith(rootPath) ||
+          !destUri.getPath().startsWith(rootPath)) {
+        System.out.println("Added Mount Point failed " + mountKey);
+        continue;
+      }
+      String[] nss = new String[]{destUri.getAuthority()};
+      boolean added = addMount(
+          mount, nss, destUri.getPath(), false,
+          false, order, getACLEntityFormHdfsPath(path, getConf()));
+      if (added) {
+        System.out.println("Added mount point " + mount);
+      }
+    }
+    return true;
+  }
+
+  /**
+   * Returns ACLEntity according to a HDFS pat.
+   * @param path A path of HDFS.
+   */
+  static public ACLEntity getACLEntityFormHdfsPath(
+      Path path, Configuration conf) {
+    String owner = null;
+    String group = null;
+    FsPermission mode = null;
+    try {
+      FileSystem fs = path.getFileSystem(conf);
+      if (fs.exists(path)) {
+        FileStatus fileStatus = fs.getFileStatus(path);
+        owner = fileStatus.getOwner();
+        group = fileStatus.getGroup();
+        mode = fileStatus.getPermission();
+      }
+    } catch (IOException e) {
+      System.err.println("Exception encountered " + e);

Review comment:
       just suggest to throw exception rather than just print the error 
information.

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
##########
@@ -241,6 +241,26 @@ Mount table permission can be set by following command:
 
 The option mode is UNIX-style permissions for the mount table. Permissions are 
specified in octal, e.g. 0755. By default, this is set to 0755.
 
+#### Init ViewFs To Router
+Router supports initializing the [ViewFs](../hadoop-hdfs/ViewFs.html) mount 
point to the Router. The mapping directory protocol of ViewFS must be HDFS, and 
the initializer only supports one-to-one mapping.
+
+For example, use the following [ViewFs](../hadoop-hdfs/ViewFs.html) to 
configure the initial mount table to the router.
+
+    <configuration>
+      <property>
+        <name>fs.viewfs.mounttable.ClusterX.link./data</name>
+        <value>hdfs://nn1-clusterx.example.com:8020/data</value>
+      </property>
+      <property>
+        <name>fs.viewfs.mounttable.ClusterY.link./project</name>
+        <value>hdfs://nn1-clustery.example.com:8020/project</value>
+      </property>
+    </configuration>
+
+The [ViewFs](../hadoop-hdfs/ViewFs.html) mount table can be initialized to the 
Router by using the following command:
+
+    [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -initViewFsToMountTable 
<clusterName>] | allClusters

Review comment:
       `$HADOOP_HOME/bin/hdfs dfsrouteradmin -initViewFsToMountTable 
<clusterName>] | allClusters`
   to 
   `$HADOOP_HOME/bin/hdfs dfsrouteradmin -initViewFsToMountTable [<clusterName> 
| allClusters]`

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
##########
@@ -1036,6 +1057,83 @@ private boolean updateQuota(String mount, long nsQuota, 
long ssQuota)
     return updateResponse.getStatus();
   }
 
+  /**
+   * Initialize the ViewFS mount point to the Router,
+   * either to specify a cluster or to initialize it all.
+   * @param clusterName The specified cluster to initialize,
+   * AllCluster was then all clusters.
+   * @return If the quota was updated.
+   * @throws IOException Error adding the mount point.
+   */
+  public boolean initViewFsToMountTable(String clusterName)
+      throws IOException {
+    // fs.viewfs.mounttable.ClusterX.link./data
+    final String mountTablePrefix;
+    if (clusterName.equals(ALL_CLUSTERS)) {
+      mountTablePrefix =
+          Constants.CONFIG_VIEWFS_PREFIX + ".*" +
+              Constants.CONFIG_VIEWFS_LINK + ".";
+    } else {
+      mountTablePrefix =
+          Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." +
+              Constants.CONFIG_VIEWFS_LINK + ".";
+    }
+    final String rootPath = "/";
+    Map<String, String> viewFsMap = getConf().getValByRegex(
+        mountTablePrefix  + rootPath);
+    if (viewFsMap.isEmpty()) {
+      System.out.println("There is no ViewFs mapping to initialize.");
+      return true;
+    }
+    for (Entry<String, String> entry : viewFsMap.entrySet()) {
+      Path path = new Path(entry.getValue());
+      URI destUri = path.toUri();
+      String mountKey = entry.getKey();
+      DestinationOrder order = DestinationOrder.HASH;
+      String mount = mountKey.replaceAll(mountTablePrefix, "");
+      if (!destUri.getScheme().equals("hdfs")) {
+        System.out.println("Only supports HDFS, " +
+            "added Mount Point failed , " + mountKey);
+      }
+      if (!mount.startsWith(rootPath) ||
+          !destUri.getPath().startsWith(rootPath)) {
+        System.out.println("Added Mount Point failed " + mountKey);
+        continue;
+      }
+      String[] nss = new String[]{destUri.getAuthority()};
+      boolean added = addMount(
+          mount, nss, destUri.getPath(), false,
+          false, order, getACLEntityFormHdfsPath(path, getConf()));

Review comment:
       if we specify `clustername`, it could be not proper to invoke directly, 
right?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to