[ 
https://issues.apache.org/jira/browse/HDFS-16570?focusedWorklogId=770645&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-770645
 ]

ASF GitHub Bot logged work on HDFS-16570:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 16/May/22 02:52
            Start Date: 16/May/22 02:52
    Worklog Time Spent: 10m 
      Work Description: zhangxiping1 commented on code in PR #4269:
URL: https://github.com/apache/hadoop/pull/4269#discussion_r873288398


##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterTrashMultipleDestinationMountTableResolver.java:
##########
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This is a test through the Router move data to the Trash with
+ * MultipleDestinationMountTableResolver.
+ */
+public class TestRouterTrashMultipleDestinationMountTableResolver {
+
+  private static StateStoreDFSCluster cluster;
+  private static MiniRouterDFSCluster.RouterContext routerContext;
+  private static MountTableResolver resolver;
+  private static MiniRouterDFSCluster.NamenodeContext nnContextNs0;
+  private static MiniRouterDFSCluster.NamenodeContext nnContextNs1;
+  private static FileSystem nnFsNs0;
+  private static FileSystem nnFsNs1;
+
+  private static String ns0;
+  private static String ns1;
+  private static final String TEST_USER = "test-trash";
+  private static final String MOUNT_POINT = "/home/data";
+  private static final String MOUNT_POINT_CHILD_DIR = MOUNT_POINT + "/test";
+  private static final String FILE_NS0 = MOUNT_POINT_CHILD_DIR + "/fileNs0";
+  private static final String FILE_NS1 = MOUNT_POINT_CHILD_DIR + "/fileNs1";
+  private static final String TRASH_ROOT = "/user/" + TEST_USER + "/.Trash";
+  private static final String CURRENT = "/Current";
+
+  @BeforeClass
+  public static void globalSetUp() throws Exception {
+    // Build and start a federated cluster
+    cluster = new StateStoreDFSCluster(false, 2,
+        MultipleDestinationMountTableResolver.class);
+    Configuration routerConf =
+        new RouterConfigBuilder().stateStore().admin().quota().rpc().build();
+
+    Configuration hdfsConf = new Configuration(false);
+    hdfsConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    hdfsConf.set("fs.trash.interval", "1440");
+    hdfsConf.set("fs.trash.checkpoint.interval", "1440");
+    cluster.addRouterOverrides(routerConf);
+    cluster.addNamenodeOverrides(hdfsConf);
+    cluster.startCluster();
+    cluster.startRouters();
+    cluster.waitClusterUp();
+
+    ns0 = cluster.getNameservices().get(0);
+    ns1 = cluster.getNameservices().get(1);
+
+    nnContextNs0 = cluster.getNamenode(ns0, null);
+    nnFsNs0 = nnContextNs0.getFileSystem();
+    nnContextNs1 = cluster.getNamenode(ns1, null);
+    nnFsNs1 = nnContextNs1.getFileSystem();
+
+    routerContext = cluster.getRandomRouter();
+    resolver =
+        (MultipleDestinationMountTableResolver) 
routerContext.getRouter().getSubclusterResolver();
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    if (cluster != null) {
+      cluster.stopRouter(routerContext);
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  private boolean addMountTable(final MountTable entry) throws IOException {
+    RouterClient client = routerContext.getAdminClient();
+    MountTableManager mountTableManager = client.getMountTableManager();
+    AddMountTableEntryRequest addRequest =
+        AddMountTableEntryRequest.newInstance(entry);
+    AddMountTableEntryResponse addResponse =
+        mountTableManager.addMountTableEntry(addRequest);
+    // Reload the Router cache
+    resolver.loadCache(true);
+    return addResponse.getStatus();
+  }
+
+  @Test
+  public void testMoveToTrashWithMultipleDestinationMountTableResolver() 
throws IOException,
+      URISyntaxException, InterruptedException {
+
+    // add MountPoint  /home/data  ns0 -> /home/data, ns1 -> /home/data
+    Map<String, String> destMap = new HashMap<>();
+    destMap.put(ns0, MOUNT_POINT);
+    destMap.put(ns1, MOUNT_POINT);
+    MountTable addEntry = MountTable.newInstance(MOUNT_POINT, destMap);
+    addEntry.setDestOrder(DestinationOrder.HASH_ALL);
+    assertTrue(addMountTable(addEntry));
+
+    // current user client ,supper user setup permission for testUser
+    DFSClient clientNs0 = nnContextNs0.getClient();
+    DFSClient clientNs1 = nnContextNs1.getClient();
+
+    clientNs0.setOwner("/", TEST_USER, TEST_USER);
+    clientNs1.setOwner("/", TEST_USER, TEST_USER);
+
+    UserGroupInformation ugi = UserGroupInformation.

Review Comment:
   OK , I will submit it together after we confirm the above changes.





Issue Time Tracking
-------------------

    Worklog Id:     (was: 770645)
    Time Spent: 1h 50m  (was: 1h 40m)

> RBF: The router using MultipleDestinationMountTableResolver remove Multiple 
> subcluster data under the mount point failed
> ------------------------------------------------------------------------------------------------------------------------
>
>                 Key: HDFS-16570
>                 URL: https://issues.apache.org/jira/browse/HDFS-16570
>             Project: Hadoop HDFS
>          Issue Type: Bug
>          Components: rbf
>            Reporter: Xiping Zhang
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 1h 50m
>  Remaining Estimate: 0h
>
> Please look at the following example :
> hadoop>{color:#FF0000}hdfs dfsrouteradmin -add /home/data ns0,ns1 /home/data 
> -order RANDOM{color}
> Successfully removed mount point /home/data
> hadoop>{color:#FF0000}hdfs dfsrouteradmin -ls{color}
> Mount Table Entries:
> Source                    Destinations              Owner                     
> Group                     Mode       Quota/Usage
> /home/data                ns0->/home/data,ns1->/home/data  zhangxiping        
>        Administrators            rwxr-xr-x  [NsQuota: -/-, SsQuota: -/-]
> hadoop>{color:#FF0000}hdfs dfs -touch 
> hdfs://ns0/home/data/test/fileNs0.txt{color}
> hadoop>{color:#FF0000}hdfs dfs -touch 
> hdfs://ns1/home/data/test/fileNs1.txt{color}
> hadoop>{color:#FF0000}hdfs dfs -ls 
> hdfs://ns0/home/data/test/fileNs0.txt{color}
> {-}rw-r{-}{-}r{-}-   3 zhangxiping supergroup          0 2022-05-06 18:01 
> hdfs://ns0/home/data/test/fileNs0.txt
> hadoop>{color:#FF0000}hdfs dfs -ls 
> hdfs://ns1/home/data/test/fileNs1.txt{color}
> {-}rw-r{-}{-}r{-}-   3 zhangxiping supergroup          0 2022-05-06 18:01 
> hdfs://ns1/home/data/test/fileNs1.txt
> hadoop>{color:#FF0000}hdfs dfs -ls 
> hdfs://127.0.0.1:40250/home/data/test{color}
> Found 2 items
> {-}rw-r{-}{-}r{-}-   3 zhangxiping supergroup          0 2022-05-06 18:01 
> hdfs://127.0.0.1:40250/home/data/test/fileNs0.txt
> {-}rw-r{-}{-}r{-}-   3 zhangxiping supergroup          0 2022-05-06 18:01 
> hdfs://127.0.0.1:40250/home/data/test/fileNs1.txt
> hadoop>{color:#FF0000}hdfs dfs -rm -r 
> hdfs://127.0.0.1:40250/home/data/test{color}
> rm: Failed to move to trash: hdfs://127.0.0.1:40250/home/data/test: rename 
> destination parent /user/zhangxiping/.Trash/Current/home/data/test not found.



--
This message was sent by Atlassian Jira
(v8.20.7#820007)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to