HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. 
Contributed by Yiqun Lin.

(cherry picked from commit e9b9f48dad5ebb58ee529f918723089e8356c480)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78ec0015
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78ec0015
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78ec0015

Branch: refs/heads/branch-2
Commit: 78ec00155ecf05b213864081df12d0ed659e40ef
Parents: 7e69242
Author: Inigo Goiri <inigo...@apache.org>
Authored: Mon Apr 9 10:09:25 2018 -0700
Committer: Inigo Goiri <inigo...@apache.org>
Committed: Mon Apr 9 10:11:38 2018 -0700

----------------------------------------------------------------------
 .../federation/router/RouterRpcServer.java      | 30 ++++++++++++++++----
 .../federation/router/TestRouterQuota.java      |  4 +++
 2 files changed, 28 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78ec0015/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index bbae3ba..d7328fe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -882,7 +882,8 @@ public class RouterRpcServer extends AbstractService
       throws IOException {
     checkOperation(OperationCategory.WRITE);
 
-    final List<RemoteLocation> srcLocations = getLocationsForPath(src, true);
+    final List<RemoteLocation> srcLocations =
+        getLocationsForPath(src, true, false);
     // srcLocations may be trimmed by getRenameDestinations()
     final List<RemoteLocation> locs = new LinkedList<>(srcLocations);
     RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -903,7 +904,8 @@ public class RouterRpcServer extends AbstractService
       final Options.Rename... options) throws IOException {
     checkOperation(OperationCategory.WRITE);
 
-    final List<RemoteLocation> srcLocations = getLocationsForPath(src, true);
+    final List<RemoteLocation> srcLocations =
+        getLocationsForPath(src, true, false);
     // srcLocations may be trimmed by getRenameDestinations()
     final List<RemoteLocation> locs = new LinkedList<>(srcLocations);
     RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -980,7 +982,8 @@ public class RouterRpcServer extends AbstractService
   public boolean delete(String src, boolean recursive) throws IOException {
     checkOperation(OperationCategory.WRITE);
 
-    final List<RemoteLocation> locations = getLocationsForPath(src, true);
+    final List<RemoteLocation> locations =
+        getLocationsForPath(src, true, false);
     RemoteMethod method = new RemoteMethod("delete",
         new Class<?>[] {String.class, boolean.class}, new RemoteParam(),
         recursive);
@@ -2081,14 +2084,29 @@ public class RouterRpcServer extends AbstractService
 
   /**
    * Get the possible locations of a path in the federated cluster.
+   * During the get operation, it will do the quota verification.
+   *
+   * @param path Path to check.
+   * @param failIfLocked Fail the request if locked (top mount point).
+   * @return Prioritized list of locations in the federated cluster.
+   * @throws IOException If the location for this path cannot be determined.
+   */
+  protected List<RemoteLocation> getLocationsForPath(String path,
+      boolean failIfLocked) throws IOException {
+    return getLocationsForPath(path, failIfLocked, true);
+  }
+
+  /**
+   * Get the possible locations of a path in the federated cluster.
    *
    * @param path Path to check.
    * @param failIfLocked Fail the request if locked (top mount point).
+   * @param needQuotaVerify If need to do the quota verification.
    * @return Prioritized list of locations in the federated cluster.
    * @throws IOException If the location for this path cannot be determined.
    */
-  protected List<RemoteLocation> getLocationsForPath(
-      String path, boolean failIfLocked) throws IOException {
+  protected List<RemoteLocation> getLocationsForPath(String path,
+      boolean failIfLocked, boolean needQuotaVerify) throws IOException {
     try {
       // Check the location for this path
       final PathLocation location =
@@ -2109,7 +2127,7 @@ public class RouterRpcServer extends AbstractService
         }
 
         // Check quota
-        if (this.router.isQuotaEnabled()) {
+        if (this.router.isQuotaEnabled() && needQuotaVerify) {
           RouterQuotaUsage quotaUsage = this.router.getQuotaManager()
               .getQuotaUsage(path);
           if (quotaUsage != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78ec0015/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
index f54a56e..3183df4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
@@ -151,6 +151,10 @@ public class TestRouterQuota {
     // mkdir in real FileSystem should be okay
     nnFs1.mkdirs(new Path("/testdir1/" + UUID.randomUUID()));
     nnFs2.mkdirs(new Path("/testdir2/" + UUID.randomUUID()));
+
+    // delete/rename call should be still okay
+    routerFs.delete(new Path("/nsquota"), true);
+    routerFs.rename(new Path("/nsquota/subdir"), new Path("/nsquota/subdir"));
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to