[ 
https://issues.apache.org/jira/browse/HDFS-17263?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17790091#comment-17790091
 ] 

ASF GitHub Bot commented on HDFS-17263:
---------------------------------------

Hexiaoqiao commented on code in PR #6291:
URL: https://github.com/apache/hadoop/pull/6291#discussion_r1406206626


##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterTrash.java:
##########
@@ -227,6 +234,61 @@ public void testMoveToTrashWithKerberosUser() throws 
IOException,
     assertEquals(1, fileStatuses.length);
   }
 
+  @Test
+  public void testMultipleMountPoint() throws IOException,
+      URISyntaxException, InterruptedException {
+    MountTable addEntry = MountTable.newInstance(MOUNT_POINT,
+        Collections.singletonMap(ns0, MOUNT_POINT));
+    MountTable addEntry1 = MountTable.newInstance(MOUNT_POINT1,
+        Collections.singletonMap(ns1, MOUNT_POINT1));
+    MountTable addEntry2 = MountTable.newInstance(MOUNT_POINT2,
+        Collections.singletonMap(ns1, MOUNT_POINT2));
+    assertTrue(addMountTable(addEntry));
+    assertTrue(addMountTable(addEntry1));
+    assertTrue(addMountTable(addEntry2));
+
+    // current user client
+    DFSClient client = nnContext.getClient();
+    client.setOwner("/", TEST_USER, TEST_USER);
+
+    DFSClient client1 = nnContext1.getClient();
+    client1.setOwner("/", TEST_USER, TEST_USER);
+
+
+    UserGroupInformation ugi = UserGroupInformation.
+        createRemoteUser(TEST_USER);
+    // test user client
+    client = nnContext.getClient(ugi);
+    client.mkdirs(MOUNT_POINT, new FsPermission("777"), true);
+    client.create(FILE, true);
+
+    client1 = nnContext1.getClient(ugi);
+    client1.mkdirs("/user", new FsPermission("777"), true);
+    client1.mkdirs(MOUNT_POINT1, new FsPermission("777"), true);
+    client1.create(FILE1, true);
+    client1.mkdirs(MOUNT_POINT2, new FsPermission("777"), true);
+
+    // move to Trash
+    Configuration routerConf = routerContext.getConf();
+    FileSystem fs =
+        DFSTestUtil.getFileSystemAs(ugi, routerConf);
+
+    Trash trash = new Trash(fs , routerConf);

Review Comment:
   Please fix the checkstyle.



##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterTrash.java:
##########
@@ -227,6 +234,61 @@ public void testMoveToTrashWithKerberosUser() throws 
IOException,
     assertEquals(1, fileStatuses.length);
   }
 
+  @Test
+  public void testMultipleMountPoint() throws IOException,
+      URISyntaxException, InterruptedException {
+    MountTable addEntry = MountTable.newInstance(MOUNT_POINT,
+        Collections.singletonMap(ns0, MOUNT_POINT));
+    MountTable addEntry1 = MountTable.newInstance(MOUNT_POINT1,
+        Collections.singletonMap(ns1, MOUNT_POINT1));
+    MountTable addEntry2 = MountTable.newInstance(MOUNT_POINT2,
+        Collections.singletonMap(ns1, MOUNT_POINT2));
+    assertTrue(addMountTable(addEntry));
+    assertTrue(addMountTable(addEntry1));
+    assertTrue(addMountTable(addEntry2));
+
+    // current user client

Review Comment:
   Please format the annotations: Start with uppercase and end with period.



##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterTrash.java:
##########
@@ -227,6 +234,61 @@ public void testMoveToTrashWithKerberosUser() throws 
IOException,
     assertEquals(1, fileStatuses.length);
   }
 
+  @Test
+  public void testMultipleMountPoint() throws IOException,
+      URISyntaxException, InterruptedException {
+    MountTable addEntry = MountTable.newInstance(MOUNT_POINT,
+        Collections.singletonMap(ns0, MOUNT_POINT));
+    MountTable addEntry1 = MountTable.newInstance(MOUNT_POINT1,
+        Collections.singletonMap(ns1, MOUNT_POINT1));
+    MountTable addEntry2 = MountTable.newInstance(MOUNT_POINT2,
+        Collections.singletonMap(ns1, MOUNT_POINT2));
+    assertTrue(addMountTable(addEntry));
+    assertTrue(addMountTable(addEntry1));
+    assertTrue(addMountTable(addEntry2));
+
+    // current user client
+    DFSClient client = nnContext.getClient();
+    client.setOwner("/", TEST_USER, TEST_USER);
+
+    DFSClient client1 = nnContext1.getClient();
+    client1.setOwner("/", TEST_USER, TEST_USER);
+
+
+    UserGroupInformation ugi = UserGroupInformation.
+        createRemoteUser(TEST_USER);
+    // test user client
+    client = nnContext.getClient(ugi);
+    client.mkdirs(MOUNT_POINT, new FsPermission("777"), true);
+    client.create(FILE, true);
+
+    client1 = nnContext1.getClient(ugi);
+    client1.mkdirs("/user", new FsPermission("777"), true);
+    client1.mkdirs(MOUNT_POINT1, new FsPermission("777"), true);
+    client1.create(FILE1, true);
+    client1.mkdirs(MOUNT_POINT2, new FsPermission("777"), true);
+
+    // move to Trash
+    Configuration routerConf = routerContext.getConf();
+    FileSystem fs =
+        DFSTestUtil.getFileSystemAs(ugi, routerConf);
+
+    Trash trash = new Trash(fs , routerConf);
+    assertTrue(trash.moveToTrash(new Path(FILE)));
+    assertTrue(trash.moveToTrash(new Path(FILE1)));
+
+
+    //Client user see gloabl trash view, wo should see all three mount point
+    FileStatus[] fileStatuses = fs.listStatus(new 
Path("/user/test-trash/.Trash/Current/"));
+    assertEquals(3, fileStatuses.length);
+
+    //This should return fileStatuses rather than Not found Exception
+    fileStatuses = fs.listStatus(new 
Path("/user/test-trash/.Trash/Current/"+MOUNT_POINT2));
+    assertEquals(0, fileStatuses.length);
+
+    client1.delete("/user",true);

Review Comment:
   Please fix the checkstyle.





> RBF: Fix client ls trash path cannot get except default nameservices trash 
> path
> -------------------------------------------------------------------------------
>
>                 Key: HDFS-17263
>                 URL: https://issues.apache.org/jira/browse/HDFS-17263
>             Project: Hadoop HDFS
>          Issue Type: Improvement
>            Reporter: liuguanghua
>            Priority: Major
>              Labels: pull-request-available
>
> With  HDFS-16024, we can rename data to the Trash should be based on src 
> locations. That is great for my useage.  After a period of use, I found this 
> cause a issue.
> There are two nameservices ns0   ns1,  and ns0 is the default nameservice.
> (1) Add moutTable 
> /home/data -> (ns0, /home/data)
> /data1/test1 -> (ns1, /data1/test1 )
> /data2/test2 -> (ns1, /data2/test2 )
> (2)mv file to trash
> ns0:   /user/test-user/.Trash/Current/home/data/file1
> ns1:   /user/test-user/.Trash/Current/data1/test1/file1
> (3) client via DFSRouter  ls will not see  
> /user/test-user/.Trash/Current/data1
> (4) client ls  /user/test-user/.Trash/Current/data2/test2 will return 
> exception .
>  
>  



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to