goiri commented on code in PR #4763:
URL: https://github.com/apache/hadoop/pull/4763#discussion_r951889394


##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java:
##########
@@ -1242,14 +1243,95 @@ public void setBalancerBandwidth(long bandwidth) throws 
IOException {
     rpcClient.invokeConcurrent(nss, method, true, false);
   }
 
+  /**
+   * Recursively get all the locations for the path.
+   * For example, there are some mount points:
+   *   /a -> ns0 -> /a
+   *   /a/b -> ns1 -> /a/b
+   *   /a/b/c -> ns2 -> /a/b/c
+   * When the path is '/a', the result of locations should be
+   * {ns0 -> [RemoteLocation(/a)], ns1 -> [RemoteLocation(/a/b)], ns2 -> 
[RemoteLocation(/a/b/c)]}
+   * @param path the path to get the locations.
+   * @param locations a map to store all the locations and key is namespace id.
+   * @throws IOException
+   */
+  @VisibleForTesting
+  void getAllLocations(String path, Map<String, List<RemoteLocation>> 
locations)
+      throws IOException {
+    try {
+      List<RemoteLocation> parentLocations =
+          rpcServer.getLocationsForPath(path, false, false);
+      parentLocations.forEach(
+          l -> locations.computeIfAbsent(l.getNameserviceId(), k -> new 
ArrayList<>()).add(l));
+    } catch (NoLocationException | RouterResolveException e) {
+      LOG.debug("Cannot find locations for {}.", path);
+    }
+
+    final List<String> children = subclusterResolver.getMountPoints(path);
+    if (children != null) {
+      for (String child : children) {
+        Path childPath = new Path(path, child);
+        getAllLocations(childPath.toUri().getPath(), locations);
+      }
+    }
+  }
+
+  /**
+   * Get all the locations of the path for {@link 
this#getContentSummary(String)}.
+   * For example, there are some mount points:
+   *   /a -> ns0 -> /a
+   *   /a/b -> ns0 -> /a/b
+   *   /a/b/c -> ns1 -> /a/b/c
+   * When the path is '/a', the result of locations should be
+   * [RemoteLocation('/a', ns0, '/a'), RemoteLocation('/a/b/c', ns1, '/a/b/c')]
+   * When the path is '/b', will throw NoLocationException.
+   * @param path the path to get content summary
+   * @return one list contains all the remote location
+   * @throws IOException
+   */
+  @VisibleForTesting
+  List<RemoteLocation> getLocationsForContentSummary(String path) throws 
IOException {
+    final Map<String, List<RemoteLocation>> ns2Locations = new HashMap<>();
+    final List<RemoteLocation> locations = new ArrayList<>();
+
+    // Try to get all the locations of the path.
+    getAllLocations(path, ns2Locations);
+
+    if (ns2Locations.isEmpty()) {
+      throw new NoLocationException(path, subclusterResolver.getClass());
+    }
+
+    // remove the redundancy remoteLocation order by destination.
+    ns2Locations.forEach((k, v) -> {
+      List<RemoteLocation> sortedList = 
v.stream().sorted().collect(Collectors.toList());
+      int size = sortedList.size();
+      for (int i = size - 1; i > -1; i--) {
+        RemoteLocation currentLocation = sortedList.get(i);
+        if (i == 0) {
+          locations.add(currentLocation);
+          continue;
+        }
+
+        RemoteLocation preLocation = sortedList.get(i - 1);
+        if (!currentLocation.getDest().startsWith(preLocation.getDest() + 
Path.SEPARATOR)) {
+          locations.add(currentLocation);
+        } else {
+          LOG.debug("Ignore the redundancy location {}, because there is an 
ancestor location {}",

Review Comment:
   "Ignore redundant location"



##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java:
##########
@@ -1242,14 +1243,95 @@ public void setBalancerBandwidth(long bandwidth) throws 
IOException {
     rpcClient.invokeConcurrent(nss, method, true, false);
   }
 
+  /**
+   * Recursively get all the locations for the path.
+   * For example, there are some mount points:
+   *   /a -> ns0 -> /a
+   *   /a/b -> ns1 -> /a/b
+   *   /a/b/c -> ns2 -> /a/b/c
+   * When the path is '/a', the result of locations should be
+   * {ns0 -> [RemoteLocation(/a)], ns1 -> [RemoteLocation(/a/b)], ns2 -> 
[RemoteLocation(/a/b/c)]}
+   * @param path the path to get the locations.
+   * @param locations a map to store all the locations and key is namespace id.
+   * @throws IOException
+   */
+  @VisibleForTesting
+  void getAllLocations(String path, Map<String, List<RemoteLocation>> 
locations)
+      throws IOException {
+    try {
+      List<RemoteLocation> parentLocations =
+          rpcServer.getLocationsForPath(path, false, false);
+      parentLocations.forEach(
+          l -> locations.computeIfAbsent(l.getNameserviceId(), k -> new 
ArrayList<>()).add(l));
+    } catch (NoLocationException | RouterResolveException e) {
+      LOG.debug("Cannot find locations for {}.", path);
+    }
+
+    final List<String> children = subclusterResolver.getMountPoints(path);
+    if (children != null) {
+      for (String child : children) {
+        Path childPath = new Path(path, child);
+        getAllLocations(childPath.toUri().getPath(), locations);
+      }
+    }
+  }
+
+  /**
+   * Get all the locations of the path for {@link 
this#getContentSummary(String)}.
+   * For example, there are some mount points:
+   *   /a -> ns0 -> /a
+   *   /a/b -> ns0 -> /a/b
+   *   /a/b/c -> ns1 -> /a/b/c
+   * When the path is '/a', the result of locations should be
+   * [RemoteLocation('/a', ns0, '/a'), RemoteLocation('/a/b/c', ns1, '/a/b/c')]
+   * When the path is '/b', will throw NoLocationException.
+   * @param path the path to get content summary
+   * @return one list contains all the remote location
+   * @throws IOException
+   */
+  @VisibleForTesting
+  List<RemoteLocation> getLocationsForContentSummary(String path) throws 
IOException {
+    final Map<String, List<RemoteLocation>> ns2Locations = new HashMap<>();
+    final List<RemoteLocation> locations = new ArrayList<>();
+
+    // Try to get all the locations of the path.
+    getAllLocations(path, ns2Locations);

Review Comment:
   ```
   final Map<String, List<RemoteLocation>> ns2Locations = getAllLocations(path);
   ```



##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java:
##########
@@ -1242,14 +1243,95 @@ public void setBalancerBandwidth(long bandwidth) throws 
IOException {
     rpcClient.invokeConcurrent(nss, method, true, false);
   }
 
+  /**
+   * Recursively get all the locations for the path.
+   * For example, there are some mount points:
+   *   /a -> ns0 -> /a
+   *   /a/b -> ns1 -> /a/b
+   *   /a/b/c -> ns2 -> /a/b/c
+   * When the path is '/a', the result of locations should be
+   * {ns0 -> [RemoteLocation(/a)], ns1 -> [RemoteLocation(/a/b)], ns2 -> 
[RemoteLocation(/a/b/c)]}
+   * @param path the path to get the locations.
+   * @param locations a map to store all the locations and key is namespace id.
+   * @throws IOException
+   */
+  @VisibleForTesting
+  void getAllLocations(String path, Map<String, List<RemoteLocation>> 
locations)
+      throws IOException {
+    try {
+      List<RemoteLocation> parentLocations =

Review Comment:
   Single line.



##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableWithoutDefaultNS.java:
##########
@@ -0,0 +1,241 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * Test a router end-to-end including the MountTable without default 
nameservice.
+ */
+public class TestRouterMountTableWithoutDefaultNS {
+  private static StateStoreDFSCluster cluster;
+  private static RouterContext routerContext;
+  private static MountTableResolver mountTable;
+  private static FileSystem nnFs0;
+  private static FileSystem nnFs1;
+
+  @BeforeClass
+  public static void globalSetUp() throws Exception {
+    // Build and start a federated cluster
+    cluster = new StateStoreDFSCluster(false, 2);
+    Configuration conf = new RouterConfigBuilder()
+        .stateStore()
+        .admin()
+        .rpc()
+        .build();
+    conf.setInt(RBFConfigKeys.DFS_ROUTER_ADMIN_MAX_COMPONENT_LENGTH_KEY, 20);
+    conf.setBoolean(RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE, 
false);
+    cluster.addRouterOverrides(conf);
+    cluster.startCluster();
+    cluster.startRouters();
+    cluster.waitClusterUp();
+
+    nnFs0 = cluster.getNamenode("ns0", null).getFileSystem();
+    nnFs1 = cluster.getNamenode("ns1", null).getFileSystem();
+    routerContext = cluster.getRandomRouter();
+
+    // Get the end points
+    routerContext = cluster.getRandomRouter();
+    Router router = routerContext.getRouter();
+    mountTable = (MountTableResolver) router.getSubclusterResolver();
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    if (cluster != null) {
+      cluster.stopRouter(routerContext);
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  @After
+  public void clearMountTable() throws IOException {
+    RouterClient client = routerContext.getAdminClient();
+    MountTableManager mountTableManager = client.getMountTableManager();
+    GetMountTableEntriesRequest req1 = 
GetMountTableEntriesRequest.newInstance("/");
+    GetMountTableEntriesResponse response = 
mountTableManager.getMountTableEntries(req1);
+    for (MountTable entry : response.getEntries()) {
+      RemoveMountTableEntryRequest req2 =
+          RemoveMountTableEntryRequest.newInstance(entry.getSourcePath());
+      mountTableManager.removeMountTableEntry(req2);
+    }
+  }
+
+  /**
+   * Add a mount table entry to the mount table through the admin API.
+   * @param entry Mount table entry to add.
+   * @return If it was succesfully added.
+   * @throws IOException Problems adding entries.
+   */
+  private boolean addMountTable(final MountTable entry) throws IOException {
+    RouterClient client = routerContext.getAdminClient();
+    MountTableManager mountTableManager = client.getMountTableManager();
+    AddMountTableEntryRequest addRequest = 
AddMountTableEntryRequest.newInstance(entry);
+    AddMountTableEntryResponse addResponse = 
mountTableManager.addMountTableEntry(addRequest);
+
+    // Reload the Router cache
+    mountTable.loadCache(true);
+
+    return addResponse.getStatus();
+  }
+
+  /**
+   * Verify that RBF that disable default nameservice should support
+   * get information about ancestor mount points.
+   */
+  @Test
+  public void testGetContentSummaryWithSubMountPoint() throws IOException {
+    MountTable addEntry = MountTable.newInstance("/testdir/1/2",
+        Collections.singletonMap("ns0", "/testdir/1/2"));
+    assertTrue(addMountTable(addEntry));
+
+    try {
+      writeData(nnFs0, new Path("/testdir/1/2/3"), 10 * 1024 * 1024);
+
+      RouterRpcServer routerRpcServer = routerContext.getRouterRpcServer();
+      ContentSummary summaryFromRBF = 
routerRpcServer.getContentSummary("/testdir");
+      assertNotNull(summaryFromRBF);
+      assertEquals(1, summaryFromRBF.getFileCount());
+      assertEquals(10 * 1024 * 1024, summaryFromRBF.getLength());
+    } finally {
+      nnFs0.delete(new Path("/testdir"), true);
+    }
+  }
+
+  @Test
+  public void testGetAllLocations() throws IOException {
+    // Add mount table entry.
+    MountTable addEntry = MountTable.newInstance("/testA",
+        Collections.singletonMap("ns0", "/testA"));
+    assertTrue(addMountTable(addEntry));
+    addEntry = MountTable.newInstance("/testA/testB",
+        Collections.singletonMap("ns1", "/testA/testB"));
+    assertTrue(addMountTable(addEntry));
+    addEntry = MountTable.newInstance("/testA/testB/testC",
+        Collections.singletonMap("ns2", "/testA/testB/testC"));
+    assertTrue(addMountTable(addEntry));
+
+    Map<String, List<RemoteLocation>> locations = new HashMap<>();
+    RouterClientProtocol protocol = 
routerContext.getRouterRpcServer().getClientProtocolModule();
+    protocol.getAllLocations("/testA", locations);
+    assertEquals(3, locations.size());
+  }
+
+  @Test
+  public void testGetLocationsForContentSummary() throws Exception {
+    // Add mount table entry.
+    MountTable addEntry = MountTable.newInstance("/testA/testB",
+        Collections.singletonMap("ns0", "/testA/testB"));
+    assertTrue(addMountTable(addEntry));
+    addEntry = MountTable.newInstance("/testA/testB/testC",
+        Collections.singletonMap("ns1", "/testA/testB/testC"));
+    assertTrue(addMountTable(addEntry));
+
+    RouterClientProtocol protocol = 
routerContext.getRouterRpcServer().getClientProtocolModule();
+    List<RemoteLocation> locations = 
protocol.getLocationsForContentSummary("/testA");
+    assertEquals(2, locations.size());
+
+    for (RemoteLocation location : locations) {
+      String nsId = location.getNameserviceId();
+      if ("ns0".equals(nsId)) {
+        assertEquals("/testA/testB", location.getDest());
+      } else if ("ns1".equals(nsId)) {
+        assertEquals("/testA/testB/testC", location.getDest());
+      } else {
+        fail("Unexpected NS " + nsId);
+      }
+    }
+
+    LambdaTestUtils.intercept(NoLocationException.class,
+        () -> protocol.getLocationsForContentSummary("/testB"));
+  }
+
+  @Test
+  public void testGetContentSummary() throws Exception {
+    try {
+      // Add mount table entry.
+      MountTable addEntry = MountTable.newInstance("/testA",
+          Collections.singletonMap("ns0", "/testA"));
+      assertTrue(addMountTable(addEntry));
+      addEntry = MountTable.newInstance("/testA/testB",
+          Collections.singletonMap("ns0", "/testA/testB"));
+      assertTrue(addMountTable(addEntry));
+      addEntry = MountTable.newInstance("/testA/testB/testC",
+          Collections.singletonMap("ns1", "/testA/testB/testC"));
+      assertTrue(addMountTable(addEntry));
+
+      writeData(nnFs0, new Path("/testA/testB/file1"), 1024 * 1024);
+      writeData(nnFs1, new Path("/testA/testB/testC/file2"), 1024 * 1024);
+      writeData(nnFs1, new Path("/testA/testB/testC/file3"), 1024 * 1024);
+
+      RouterRpcServer routerRpcServer = routerContext.getRouterRpcServer();
+      ContentSummary summary = routerRpcServer.getContentSummary("/testA");
+      assertEquals(3, summary.getFileCount());
+      assertEquals(1024 * 1024 * 3, summary.getLength());
+
+      LambdaTestUtils.intercept(NoLocationException.class,
+          () -> routerRpcServer.getContentSummary("/testB"));
+    } finally {
+      nnFs0.delete(new Path("/testA"), true);
+      nnFs1.delete(new Path("/testA"), true);
+    }
+  }
+
+  void writeData(FileSystem fs, Path path, int fileLength) throws IOException {
+    try (FSDataOutputStream outputStream = fs.create(path)) {
+      int writeSize = 0;
+      while (writeSize < fileLength) {
+        outputStream.write(writeSize);
+        writeSize++;

Review Comment:
   You could do it in a for loop otherwise:
   ```
   for (int writeSize = 0; writeSize < fileLength; writeSize++) {
   ```



##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java:
##########
@@ -1242,14 +1243,95 @@ public void setBalancerBandwidth(long bandwidth) throws 
IOException {
     rpcClient.invokeConcurrent(nss, method, true, false);
   }
 
+  /**
+   * Recursively get all the locations for the path.
+   * For example, there are some mount points:
+   *   /a -> ns0 -> /a
+   *   /a/b -> ns1 -> /a/b
+   *   /a/b/c -> ns2 -> /a/b/c
+   * When the path is '/a', the result of locations should be
+   * {ns0 -> [RemoteLocation(/a)], ns1 -> [RemoteLocation(/a/b)], ns2 -> 
[RemoteLocation(/a/b/c)]}
+   * @param path the path to get the locations.
+   * @param locations a map to store all the locations and key is namespace id.
+   * @throws IOException
+   */
+  @VisibleForTesting
+  void getAllLocations(String path, Map<String, List<RemoteLocation>> 
locations)

Review Comment:
   I think it would be cleaner to define the output for this method instead of 
using the argument.



##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java:
##########
@@ -1242,14 +1243,95 @@ public void setBalancerBandwidth(long bandwidth) throws 
IOException {
     rpcClient.invokeConcurrent(nss, method, true, false);
   }
 
+  /**
+   * Recursively get all the locations for the path.
+   * For example, there are some mount points:
+   *   /a -> ns0 -> /a
+   *   /a/b -> ns1 -> /a/b
+   *   /a/b/c -> ns2 -> /a/b/c
+   * When the path is '/a', the result of locations should be
+   * {ns0 -> [RemoteLocation(/a)], ns1 -> [RemoteLocation(/a/b)], ns2 -> 
[RemoteLocation(/a/b/c)]}
+   * @param path the path to get the locations.
+   * @param locations a map to store all the locations and key is namespace id.
+   * @throws IOException
+   */
+  @VisibleForTesting
+  void getAllLocations(String path, Map<String, List<RemoteLocation>> 
locations)
+      throws IOException {
+    try {
+      List<RemoteLocation> parentLocations =
+          rpcServer.getLocationsForPath(path, false, false);
+      parentLocations.forEach(
+          l -> locations.computeIfAbsent(l.getNameserviceId(), k -> new 
ArrayList<>()).add(l));
+    } catch (NoLocationException | RouterResolveException e) {
+      LOG.debug("Cannot find locations for {}.", path);
+    }
+
+    final List<String> children = subclusterResolver.getMountPoints(path);
+    if (children != null) {
+      for (String child : children) {
+        Path childPath = new Path(path, child);
+        getAllLocations(childPath.toUri().getPath(), locations);
+      }
+    }
+  }
+
+  /**
+   * Get all the locations of the path for {@link 
this#getContentSummary(String)}.
+   * For example, there are some mount points:
+   *   /a -> ns0 -> /a
+   *   /a/b -> ns0 -> /a/b
+   *   /a/b/c -> ns1 -> /a/b/c
+   * When the path is '/a', the result of locations should be
+   * [RemoteLocation('/a', ns0, '/a'), RemoteLocation('/a/b/c', ns1, '/a/b/c')]
+   * When the path is '/b', will throw NoLocationException.
+   * @param path the path to get content summary
+   * @return one list contains all the remote location
+   * @throws IOException
+   */
+  @VisibleForTesting
+  List<RemoteLocation> getLocationsForContentSummary(String path) throws 
IOException {
+    final Map<String, List<RemoteLocation>> ns2Locations = new HashMap<>();
+    final List<RemoteLocation> locations = new ArrayList<>();
+
+    // Try to get all the locations of the path.
+    getAllLocations(path, ns2Locations);
+
+    if (ns2Locations.isEmpty()) {
+      throw new NoLocationException(path, subclusterResolver.getClass());
+    }
+
+    // remove the redundancy remoteLocation order by destination.
+    ns2Locations.forEach((k, v) -> {
+      List<RemoteLocation> sortedList = 
v.stream().sorted().collect(Collectors.toList());
+      int size = sortedList.size();
+      for (int i = size - 1; i > -1; i--) {
+        RemoteLocation currentLocation = sortedList.get(i);
+        if (i == 0) {
+          locations.add(currentLocation);
+          continue;

Review Comment:
   This is very personal but I would prefer to use an else instead of a 
continue.



##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableWithoutDefaultNS.java:
##########
@@ -0,0 +1,241 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * Test a router end-to-end including the MountTable without default 
nameservice.
+ */
+public class TestRouterMountTableWithoutDefaultNS {
+  private static StateStoreDFSCluster cluster;
+  private static RouterContext routerContext;
+  private static MountTableResolver mountTable;
+  private static FileSystem nnFs0;
+  private static FileSystem nnFs1;
+
+  @BeforeClass
+  public static void globalSetUp() throws Exception {
+    // Build and start a federated cluster
+    cluster = new StateStoreDFSCluster(false, 2);
+    Configuration conf = new RouterConfigBuilder()
+        .stateStore()
+        .admin()
+        .rpc()
+        .build();
+    conf.setInt(RBFConfigKeys.DFS_ROUTER_ADMIN_MAX_COMPONENT_LENGTH_KEY, 20);
+    conf.setBoolean(RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE, 
false);
+    cluster.addRouterOverrides(conf);
+    cluster.startCluster();
+    cluster.startRouters();
+    cluster.waitClusterUp();
+
+    nnFs0 = cluster.getNamenode("ns0", null).getFileSystem();
+    nnFs1 = cluster.getNamenode("ns1", null).getFileSystem();
+    routerContext = cluster.getRandomRouter();
+
+    // Get the end points
+    routerContext = cluster.getRandomRouter();
+    Router router = routerContext.getRouter();
+    mountTable = (MountTableResolver) router.getSubclusterResolver();
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    if (cluster != null) {
+      cluster.stopRouter(routerContext);
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  @After
+  public void clearMountTable() throws IOException {
+    RouterClient client = routerContext.getAdminClient();
+    MountTableManager mountTableManager = client.getMountTableManager();
+    GetMountTableEntriesRequest req1 = 
GetMountTableEntriesRequest.newInstance("/");
+    GetMountTableEntriesResponse response = 
mountTableManager.getMountTableEntries(req1);
+    for (MountTable entry : response.getEntries()) {
+      RemoveMountTableEntryRequest req2 =
+          RemoveMountTableEntryRequest.newInstance(entry.getSourcePath());
+      mountTableManager.removeMountTableEntry(req2);
+    }
+  }
+
+  /**
+   * Add a mount table entry to the mount table through the admin API.
+   * @param entry Mount table entry to add.
+   * @return If it was succesfully added.
+   * @throws IOException Problems adding entries.
+   */
+  private boolean addMountTable(final MountTable entry) throws IOException {
+    RouterClient client = routerContext.getAdminClient();
+    MountTableManager mountTableManager = client.getMountTableManager();
+    AddMountTableEntryRequest addRequest = 
AddMountTableEntryRequest.newInstance(entry);
+    AddMountTableEntryResponse addResponse = 
mountTableManager.addMountTableEntry(addRequest);
+
+    // Reload the Router cache
+    mountTable.loadCache(true);
+
+    return addResponse.getStatus();
+  }
+
+  /**
+   * Verify that RBF that disable default nameservice should support
+   * get information about ancestor mount points.
+   */
+  @Test
+  public void testGetContentSummaryWithSubMountPoint() throws IOException {
+    MountTable addEntry = MountTable.newInstance("/testdir/1/2",
+        Collections.singletonMap("ns0", "/testdir/1/2"));
+    assertTrue(addMountTable(addEntry));
+
+    try {
+      writeData(nnFs0, new Path("/testdir/1/2/3"), 10 * 1024 * 1024);
+
+      RouterRpcServer routerRpcServer = routerContext.getRouterRpcServer();
+      ContentSummary summaryFromRBF = 
routerRpcServer.getContentSummary("/testdir");
+      assertNotNull(summaryFromRBF);
+      assertEquals(1, summaryFromRBF.getFileCount());
+      assertEquals(10 * 1024 * 1024, summaryFromRBF.getLength());
+    } finally {
+      nnFs0.delete(new Path("/testdir"), true);
+    }
+  }
+
+  @Test
+  public void testGetAllLocations() throws IOException {
+    // Add mount table entry.
+    MountTable addEntry = MountTable.newInstance("/testA",
+        Collections.singletonMap("ns0", "/testA"));
+    assertTrue(addMountTable(addEntry));
+    addEntry = MountTable.newInstance("/testA/testB",
+        Collections.singletonMap("ns1", "/testA/testB"));
+    assertTrue(addMountTable(addEntry));
+    addEntry = MountTable.newInstance("/testA/testB/testC",
+        Collections.singletonMap("ns2", "/testA/testB/testC"));
+    assertTrue(addMountTable(addEntry));
+
+    Map<String, List<RemoteLocation>> locations = new HashMap<>();
+    RouterClientProtocol protocol = 
routerContext.getRouterRpcServer().getClientProtocolModule();
+    protocol.getAllLocations("/testA", locations);
+    assertEquals(3, locations.size());
+  }
+
+  @Test
+  public void testGetLocationsForContentSummary() throws Exception {
+    // Add mount table entry.
+    MountTable addEntry = MountTable.newInstance("/testA/testB",
+        Collections.singletonMap("ns0", "/testA/testB"));
+    assertTrue(addMountTable(addEntry));
+    addEntry = MountTable.newInstance("/testA/testB/testC",
+        Collections.singletonMap("ns1", "/testA/testB/testC"));
+    assertTrue(addMountTable(addEntry));
+
+    RouterClientProtocol protocol = 
routerContext.getRouterRpcServer().getClientProtocolModule();
+    List<RemoteLocation> locations = 
protocol.getLocationsForContentSummary("/testA");
+    assertEquals(2, locations.size());
+
+    for (RemoteLocation location : locations) {
+      String nsId = location.getNameserviceId();
+      if ("ns0".equals(nsId)) {
+        assertEquals("/testA/testB", location.getDest());
+      } else if ("ns1".equals(nsId)) {
+        assertEquals("/testA/testB/testC", location.getDest());
+      } else {
+        fail("Unexpected NS " + nsId);
+      }
+    }
+
+    LambdaTestUtils.intercept(NoLocationException.class,
+        () -> protocol.getLocationsForContentSummary("/testB"));
+  }
+
+  @Test
+  public void testGetContentSummary() throws Exception {
+    try {
+      // Add mount table entry.
+      MountTable addEntry = MountTable.newInstance("/testA",
+          Collections.singletonMap("ns0", "/testA"));
+      assertTrue(addMountTable(addEntry));
+      addEntry = MountTable.newInstance("/testA/testB",
+          Collections.singletonMap("ns0", "/testA/testB"));
+      assertTrue(addMountTable(addEntry));
+      addEntry = MountTable.newInstance("/testA/testB/testC",
+          Collections.singletonMap("ns1", "/testA/testB/testC"));
+      assertTrue(addMountTable(addEntry));
+
+      writeData(nnFs0, new Path("/testA/testB/file1"), 1024 * 1024);
+      writeData(nnFs1, new Path("/testA/testB/testC/file2"), 1024 * 1024);
+      writeData(nnFs1, new Path("/testA/testB/testC/file3"), 1024 * 1024);
+
+      RouterRpcServer routerRpcServer = routerContext.getRouterRpcServer();
+      ContentSummary summary = routerRpcServer.getContentSummary("/testA");
+      assertEquals(3, summary.getFileCount());
+      assertEquals(1024 * 1024 * 3, summary.getLength());
+
+      LambdaTestUtils.intercept(NoLocationException.class,
+          () -> routerRpcServer.getContentSummary("/testB"));
+    } finally {
+      nnFs0.delete(new Path("/testA"), true);
+      nnFs1.delete(new Path("/testA"), true);
+    }
+  }
+
+  void writeData(FileSystem fs, Path path, int fileLength) throws IOException {
+    try (FSDataOutputStream outputStream = fs.create(path)) {
+      int writeSize = 0;
+      while (writeSize < fileLength) {
+        outputStream.write(writeSize);
+        writeSize++;

Review Comment:
   ```
   writeSize += outputStream.write(writeSize);
   ```
   Is this a thing we can do?



##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java:
##########
@@ -1242,14 +1243,95 @@ public void setBalancerBandwidth(long bandwidth) throws 
IOException {
     rpcClient.invokeConcurrent(nss, method, true, false);
   }
 
+  /**
+   * Recursively get all the locations for the path.
+   * For example, there are some mount points:
+   *   /a -> ns0 -> /a
+   *   /a/b -> ns1 -> /a/b
+   *   /a/b/c -> ns2 -> /a/b/c
+   * When the path is '/a', the result of locations should be
+   * {ns0 -> [RemoteLocation(/a)], ns1 -> [RemoteLocation(/a/b)], ns2 -> 
[RemoteLocation(/a/b/c)]}
+   * @param path the path to get the locations.
+   * @param locations a map to store all the locations and key is namespace id.
+   * @throws IOException
+   */
+  @VisibleForTesting
+  void getAllLocations(String path, Map<String, List<RemoteLocation>> 
locations)
+      throws IOException {
+    try {
+      List<RemoteLocation> parentLocations =
+          rpcServer.getLocationsForPath(path, false, false);
+      parentLocations.forEach(
+          l -> locations.computeIfAbsent(l.getNameserviceId(), k -> new 
ArrayList<>()).add(l));
+    } catch (NoLocationException | RouterResolveException e) {
+      LOG.debug("Cannot find locations for {}.", path);
+    }
+
+    final List<String> children = subclusterResolver.getMountPoints(path);
+    if (children != null) {
+      for (String child : children) {
+        Path childPath = new Path(path, child);
+        getAllLocations(childPath.toUri().getPath(), locations);
+      }
+    }
+  }
+
+  /**
+   * Get all the locations of the path for {@link 
this#getContentSummary(String)}.
+   * For example, there are some mount points:
+   *   /a -> ns0 -> /a
+   *   /a/b -> ns0 -> /a/b
+   *   /a/b/c -> ns1 -> /a/b/c
+   * When the path is '/a', the result of locations should be
+   * [RemoteLocation('/a', ns0, '/a'), RemoteLocation('/a/b/c', ns1, '/a/b/c')]
+   * When the path is '/b', will throw NoLocationException.
+   * @param path the path to get content summary
+   * @return one list contains all the remote location
+   * @throws IOException
+   */
+  @VisibleForTesting
+  List<RemoteLocation> getLocationsForContentSummary(String path) throws 
IOException {
+    final Map<String, List<RemoteLocation>> ns2Locations = new HashMap<>();
+    final List<RemoteLocation> locations = new ArrayList<>();

Review Comment:
   Define this before the loop so is easy to see where we do what.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to