This is an automated email from the ASF dual-hosted git repository.

inigoiri pushed a commit to branch HDFS-13891
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit dca3b2edf2ac77019c9d6c7d76ca35f2f451327c
Author: Yiqun Lin <yq...@apache.org>
AuthorDate: Tue Oct 23 14:34:29 2018 +0800

    HDFS-14011. RBF: Add more information to HdfsFileStatus for a mount point. 
Contributed by Akira Ajisaka.
---
 .../resolver/FileSubclusterResolver.java           |  6 ++-
 .../federation/router/RouterClientProtocol.java    | 30 +++++++++---
 .../router/RouterQuotaUpdateService.java           |  9 ++--
 .../hdfs/server/federation/MockResolver.java       | 17 +++----
 .../federation/router/TestRouterMountTable.java    | 55 +++++++++++++++++++++-
 .../router/TestRouterRpcMultiDestination.java      |  5 +-
 6 files changed, 97 insertions(+), 25 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
index 5aa5ec9..6432bb0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
@@ -61,8 +61,10 @@ public interface FileSubclusterResolver {
    * cache.
    *
    * @param path Path to get the mount points under.
-   * @return List of mount points present at this path or zero-length list if
-   *         none are found.
+   * @return List of mount points present at this path. Return zero-length
+   *         list if the path is a mount point but there are no mount points
+   *         under the path. Return null if the path is not a mount point
+   *         and there are no mount points under the path.
    * @throws IOException Throws exception if the data is not available.
    */
   List<String> getMountPoints(String path) throws IOException;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index 344401f..9e2979b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -720,6 +720,9 @@ public class RouterClientProtocol implements ClientProtocol 
{
           date = dates.get(src);
         }
         ret = getMountPointStatus(src, children.size(), date);
+      } else if (children != null) {
+        // The src is a mount point, but there are no files or directories
+        ret = getMountPointStatus(src, 0, 0);
       }
     }
 
@@ -1728,13 +1731,26 @@ public class RouterClientProtocol implements 
ClientProtocol {
     FsPermission permission = FsPermission.getDirDefault();
     String owner = this.superUser;
     String group = this.superGroup;
-    try {
-      // TODO support users, it should be the user for the pointed folder
-      UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
-      owner = ugi.getUserName();
-      group = ugi.getPrimaryGroupName();
-    } catch (IOException e) {
-      LOG.error("Cannot get the remote user: {}", e.getMessage());
+    if (subclusterResolver instanceof MountTableResolver) {
+      try {
+        MountTableResolver mountTable = (MountTableResolver) 
subclusterResolver;
+        MountTable entry = mountTable.getMountPoint(name);
+        if (entry != null) {
+          permission = entry.getMode();
+          owner = entry.getOwnerName();
+          group = entry.getGroupName();
+        }
+      } catch (IOException e) {
+        LOG.error("Cannot get mount point: {}", e.getMessage());
+      }
+    } else {
+      try {
+        UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
+        owner = ugi.getUserName();
+        group = ugi.getPrimaryGroupName();
+      } catch (IOException e) {
+        LOG.error("Cannot get remote user: {}", e.getMessage());
+      }
     }
     long inodeId = 0;
     return new HdfsFileStatus.Builder()
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
index 4813b53..9bfd705 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
@@ -87,11 +87,12 @@ public class RouterQuotaUpdateService extends 
PeriodicService {
 
         QuotaUsage currentQuotaUsage = null;
 
-        // Check whether destination path exists in filesystem. If destination
-        // is not present, reset the usage. For other mount entry get current
-        // quota usage
+        // Check whether destination path exists in filesystem. When the
+        // mtime is zero, the destination is not present and reset the usage.
+        // This is because mount table does not have mtime.
+        // For other mount entry get current quota usage
         HdfsFileStatus ret = this.rpcServer.getFileInfo(src);
-        if (ret == null) {
+        if (ret == null || ret.getModificationTime() == 0) {
           currentQuotaUsage = new RouterQuotaUsage.Builder()
               .fileAndDirectoryCount(0)
               .quota(nsQuota)
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
index f5636ce..9bff007 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -303,15 +303,16 @@ public class MockResolver
 
   @Override
   public List<String> getMountPoints(String path) throws IOException {
+    // Mounts only supported under root level
+    if (!path.equals("/")) {
+      return null;
+    }
     List<String> mounts = new ArrayList<>();
-    if (path.equals("/")) {
-      // Mounts only supported under root level
-      for (String mount : this.locations.keySet()) {
-        if (mount.length() > 1) {
-          // Remove leading slash, this is the behavior of the mount tree,
-          // return only names.
-          mounts.add(mount.replace("/", ""));
-        }
+    for (String mount : this.locations.keySet()) {
+      if (mount.length() > 1) {
+        // Remove leading slash, this is the behavior of the mount tree,
+        // return only names.
+        mounts.add(mount.replace("/", ""));
       }
     }
     return mounts;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
index 4d8ffe1..d2b78d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -43,8 +44,12 @@ import 
org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.util.Time;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -59,9 +64,11 @@ public class TestRouterMountTable {
   private static RouterContext routerContext;
   private static MountTableResolver mountTable;
   private static ClientProtocol routerProtocol;
+  private static long startTime;
 
   @BeforeClass
   public static void globalSetUp() throws Exception {
+    startTime = Time.now();
 
     // Build and start a federated cluster
     cluster = new StateStoreDFSCluster(false, 1);
@@ -92,6 +99,21 @@ public class TestRouterMountTable {
     }
   }
 
+  @After
+  public void clearMountTable() throws IOException {
+    RouterClient client = routerContext.getAdminClient();
+    MountTableManager mountTableManager = client.getMountTableManager();
+    GetMountTableEntriesRequest req1 =
+        GetMountTableEntriesRequest.newInstance("/");
+    GetMountTableEntriesResponse response =
+        mountTableManager.getMountTableEntries(req1);
+    for (MountTable entry : response.getEntries()) {
+      RemoveMountTableEntryRequest req2 =
+          RemoveMountTableEntryRequest.newInstance(entry.getSourcePath());
+      mountTableManager.removeMountTableEntry(req2);
+    }
+  }
+
   @Test
   public void testReadOnly() throws Exception {
 
@@ -157,7 +179,6 @@ public class TestRouterMountTable {
    */
   @Test
   public void testListFilesTime() throws Exception {
-    Long beforeCreatingTime = Time.now();
     // Add mount table entry
     MountTable addEntry = MountTable.newInstance(
         "/testdir", Collections.singletonMap("ns0", "/testdir"));
@@ -211,10 +232,40 @@ public class TestRouterMountTable {
       Long expectedTime = pathModTime.get(currentFile);
 
       assertEquals(currentFile, fileName);
-      assertTrue(currentTime > beforeCreatingTime);
+      assertTrue(currentTime > startTime);
       assertEquals(currentTime, expectedTime);
     }
     // Verify the total number of results found/matched
     assertEquals(pathModTime.size(), listing.getPartialListing().length);
   }
+
+  /**
+   * Verify that the file listing contains correct permission.
+   */
+  @Test
+  public void testMountTablePermissions() throws Exception {
+    // Add mount table entries
+    MountTable addEntry = MountTable.newInstance(
+        "/testdir1", Collections.singletonMap("ns0", "/testdir1"));
+    addEntry.setGroupName("group1");
+    addEntry.setOwnerName("owner1");
+    addEntry.setMode(FsPermission.createImmutable((short)0775));
+    assertTrue(addMountTable(addEntry));
+    addEntry = MountTable.newInstance(
+        "/testdir2", Collections.singletonMap("ns0", "/testdir2"));
+    addEntry.setGroupName("group2");
+    addEntry.setOwnerName("owner2");
+    addEntry.setMode(FsPermission.createImmutable((short)0755));
+    assertTrue(addMountTable(addEntry));
+
+    HdfsFileStatus fs = routerProtocol.getFileInfo("/testdir1");
+    assertEquals("group1", fs.getGroup());
+    assertEquals("owner1", fs.getOwner());
+    assertEquals((short) 0775, fs.getPermission().toShort());
+
+    fs = routerProtocol.getFileInfo("/testdir2");
+    assertEquals("group2", fs.getGroup());
+    assertEquals("owner2", fs.getOwner());
+    assertEquals((short) 0755, fs.getPermission().toShort());
+  }
 }
\ No newline at end of file
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
index 7e09760..94b712f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
@@ -123,8 +123,9 @@ public class TestRouterRpcMultiDestination extends 
TestRouterRpc {
     RouterContext rc = getRouterContext();
     Router router = rc.getRouter();
     FileSubclusterResolver subclusterResolver = router.getSubclusterResolver();
-    for (String mount : subclusterResolver.getMountPoints(path)) {
-      requiredPaths.add(mount);
+    List<String> mountList = subclusterResolver.getMountPoints(path);
+    if (mountList != null) {
+      requiredPaths.addAll(mountList);
     }
 
     // Get files/dirs from the Namenodes


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to