HDFS-12577. Rename Router tooling. Contributed by Inigo Goiri.

(cherry picked from commit 53e8d0d030525e4c7f3875e23807c6dbe778890f)
(cherry picked from commit 5d63a388d1c3ec8a658cb2fd9b34c240bddf15a0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/629b88b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/629b88b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/629b88b4

Branch: refs/heads/branch-2
Commit: 629b88b4dd355002d5b6d0cce7525c1f20b8592f
Parents: 27295ee
Author: Inigo Goiri <inigo...@apache.org>
Authored: Fri Oct 6 17:31:53 2017 -0700
Committer: Inigo Goiri <inigo...@apache.org>
Committed: Thu Oct 19 17:39:27 2017 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs/src/main/bin/hdfs.cmd           | 11 +--
 .../server/federation/router/DFSRouter.java     | 76 ++++++++++++++++++++
 .../hdfs/server/federation/router/Router.java   | 39 ----------
 .../src/site/markdown/HDFSRouterFederation.md   | 12 ++--
 4 files changed, 88 insertions(+), 50 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/629b88b4/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index 53bdf70..a9a7852 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
     )
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto router federation debug
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto dfsrouter dfsrouteradmin debug
   for %%i in ( %hdfscommands% ) do (
     if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -179,12 +179,12 @@ goto :eof
   set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
   goto :eof
 
-:router
-  set CLASS=org.apache.hadoop.hdfs.server.federation.router.Router
+:dfsrouter
+  set CLASS=org.apache.hadoop.hdfs.server.federation.router.DFSRouter
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
   goto :eof
 
-:federation
+:dfsrouteradmin
   set CLASS=org.apache.hadoop.hdfs.tools.federation.RouterAdmin
   set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
   goto :eof
@@ -229,7 +229,8 @@ goto :eof
   @echo   secondarynamenode    run the DFS secondary namenode
   @echo   namenode             run the DFS namenode
   @echo   journalnode          run the DFS journalnode
-  @echo   router               run the DFS router
+  @echo   dfsrouter            run the DFS router
+  @echo   dfsrouteradmin       manage Router-based federation
   @echo   zkfc                 run the ZK Failover Controller daemon
   @echo   datanode             run a DFS datanode
   @echo   dfsadmin             run a DFS admin client

http://git-wip-us.apache.org/repos/asf/hadoop/blob/629b88b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/DFSRouter.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/DFSRouter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/DFSRouter.java
new file mode 100644
index 0000000..a2ac258
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/DFSRouter.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.service.CompositeService.CompositeServiceShutdownHook;
+import org.apache.hadoop.util.ShutdownHookManager;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tool to start the {@link Router} for Router-based federation.
+ */
+public final class DFSRouter {
+
+  private static final Logger LOG = LoggerFactory.getLogger(DFSRouter.class);
+
+
+  /** Usage string for help message. */
+  private static final String USAGE = "Usage: hdfs dfsrouter";
+
+  /** Priority of the Router shutdown hook. */
+  public static final int SHUTDOWN_HOOK_PRIORITY = 30;
+
+
+  private DFSRouter() {
+    // This is just a class to trigger the Router
+  }
+
+  /**
+   * Main run loop for the router.
+   *
+   * @param argv parameters.
+   */
+  public static void main(String[] argv) {
+    if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
+      System.exit(0);
+    }
+
+    try {
+      StringUtils.startupShutdownMessage(Router.class, argv, LOG);
+
+      Router router = new Router();
+
+      ShutdownHookManager.get().addShutdownHook(
+          new CompositeServiceShutdownHook(router), SHUTDOWN_HOOK_PRIORITY);
+
+      Configuration conf = new HdfsConfiguration();
+      router.init(conf);
+      router.start();
+    } catch (Throwable e) {
+      LOG.error("Failed to start router", e);
+      terminate(1, e);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/629b88b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
index df5549c..443b9a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.federation.router;
 
 import static 
org.apache.hadoop.hdfs.server.federation.router.FederationUtil.newActiveNamenodeResolver;
 import static 
org.apache.hadoop.hdfs.server.federation.router.FederationUtil.newFileSubclusterResolver;
-import static org.apache.hadoop.util.ExitUtil.terminate;
 
 import java.io.IOException;
 import java.net.InetAddress;
@@ -35,7 +34,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
@@ -44,8 +42,6 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.JvmPauseMonitor;
-import org.apache.hadoop.util.ShutdownHookManager;
-import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -110,12 +106,6 @@ public class Router extends CompositeService {
   private JvmPauseMonitor pauseMonitor;
 
 
-  /** Usage string for help message. */
-  private static final String USAGE = "Usage: java Router";
-
-  /** Priority of the Router shutdown hook. */
-  public static final int SHUTDOWN_HOOK_PRIORITY = 30;
-
 
   /////////////////////////////////////////////////////////
   // Constructor
@@ -250,35 +240,6 @@ public class Router extends CompositeService {
     }.start();
   }
 
-  /**
-   * Main run loop for the router.
-   *
-   * @param argv parameters.
-   */
-  public static void main(String[] argv) {
-    if (DFSUtil.parseHelpArgument(argv, Router.USAGE, System.out, true)) {
-      System.exit(0);
-    }
-
-    try {
-      StringUtils.startupShutdownMessage(Router.class, argv, LOG);
-
-      Router router = new Router();
-
-      ShutdownHookManager.get().addShutdownHook(
-          new CompositeServiceShutdownHook(router), SHUTDOWN_HOOK_PRIORITY);
-
-      Configuration conf = new HdfsConfiguration();
-      router.init(conf);
-      router.start();
-    } catch (Throwable e) {
-      LOG.error("Failed to start router", e);
-      terminate(1, e);
-    }
-  }
-
-
-
   /////////////////////////////////////////////////////////
   // RPC Server
   /////////////////////////////////////////////////////////

http://git-wip-us.apache.org/repos/asf/hadoop/blob/629b88b4/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
index 1cea7f6..2cad0f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
@@ -164,11 +164,11 @@ The rest of the options are documented in 
[hdfs-default.xml](./hdfs-default.xml)
 
 Once the Router is configured, it can be started:
 
-    [hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script 
$HADOOP_PREFIX/bin/hdfs start router
+    [hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script 
$HADOOP_PREFIX/bin/hdfs start dfsrouter
 
 And to stop it:
 
-    [hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script 
$HADOOP_PREFIX/bin/hdfs stop router
+    [hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script 
$HADOOP_PREFIX/bin/hdfs stop dfsrouter
 
 ### Mount table management
 
@@ -179,10 +179,10 @@ For example, if we to mount `/data/app1` in the federated 
namespace, it is recom
 The federation admin tool supports managing the mount table.
 For example, to create three mount points and list them:
 
-    [hdfs]$ $HADOOP_HOME/bin/hdfs federation -add /tmp ns1 /tmp
-    [hdfs]$ $HADOOP_HOME/bin/hdfs federation -add /data/app1 ns2 /data/app1
-    [hdfs]$ $HADOOP_HOME/bin/hdfs federation -add /data/app2 ns3 /data/app2
-    [hdfs]$ $HADOOP_HOME/bin/hdfs federation -ls
+    [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /tmp ns1 /tmp
+    [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /data/app1 ns2 /data/app1
+    [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /data/app2 ns3 /data/app2
+    [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -ls
 
 If a mount point is not set, the Router will map it to the default namespace 
`dfs.federation.router.default.nameserviceId`.
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to