This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch fgl
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 892fa48c5a71eadc61b0f7a3cf22327b0515f0d4
Author: prasad-acit <prasad_a...@yahoo.co.in>
AuthorDate: Fri Aug 13 11:51:18 2021 -0700

    HDFS-16141. [FGL] Address permission related issues with File / Directory. 
Contributed by Renukaprasad C. (#3205)
---
 .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java  | 21 ++++++---
 .../hdfs/server/namenode/FSDirWriteFileOp.java     |  2 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../hdfs/server/namenode/FSNamesystemLock.java     |  2 +-
 .../hadoop/hdfs/server/namenode/INodeMap.java      |  3 +-
 .../java/org/apache/hadoop/hdfs/TestDFSMkdirs.java | 53 ++++++++++++++++++++++
 .../org/apache/hadoop/hdfs/TestFileCreation.java   |  4 ++
 7 files changed, 76 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index f6febe2..f1b2ee2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -70,7 +70,7 @@ class FSDirMkdirOp {
         // create multiple inodes.
         fsn.checkFsObjectLimit();
 
-        iip = createMissingDirs(fsd, iip, permissions);
+        iip = createMissingDirs(fsd, iip, permissions, false);
       }
       return fsd.getAuditFileInfo(iip);
     } finally {
@@ -78,11 +78,14 @@ class FSDirMkdirOp {
     }
   }
 
-  static INodesInPath createMissingDirs(FSDirectory fsd,
-      INodesInPath iip, PermissionStatus permissions) throws IOException {
+  static INodesInPath createMissingDirs(FSDirectory fsd, INodesInPath iip,
+      PermissionStatus permissions, boolean inheritPerms) throws IOException {
+    PermissionStatus basePerm = inheritPerms ?
+        iip.getExistingINodes().getLastINode().getPermissionStatus() :
+        permissions;
     // create all missing directories along the path,
     // but don't add them to the INodeMap yet
-    permissions = addImplicitUwx(permissions, permissions); // SHV !!!
+    permissions = addImplicitUwx(basePerm, permissions);
     INode[] missing = createPathDirectories(fsd, iip, permissions);
     iip = iip.getExistingINodes();
     if (missing.length == 0) {
@@ -90,8 +93,15 @@ class FSDirMkdirOp {
     }
     // switch the locks
     fsd.getINodeMap().latchWriteLock(iip, missing);
+    int counter = 0;
     // Add missing inodes to the INodeMap
     for (INode dir : missing) {
+      if (counter++ == missing.length - 1) {
+        //Last folder in the path, use the user given permission
+        //For MKDIR - refers to the permission given by the user
+        //For create - refers to the parent directory permission.
+        permissions = basePerm;
+      }
       iip = addSingleDirectory(fsd, iip, dir, permissions);
       assert iip != null : "iip should not be null";
     }
@@ -279,13 +289,10 @@ class FSDirMkdirOp {
     // create the missing directories along the path
     INode[] missing = new INode[numMissing];
     final int last = iip.length();
-    INode parent = existing.getLastINode();
     for (int i = existing.length();  i < last; i++) {
       byte[] component = iip.getPathComponent(i);
       missing[i - existing.length()] =
           createDirectoryINode(fsd, existing, component, perm);
-      missing[i - existing.length()].setParent(parent.asDirectory());
-      parent = missing[i - existing.length()];
     }
     return missing;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index f2cca7b3..96f9907 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -400,7 +400,7 @@ class FSDirWriteFileOp {
     fsn.checkFsObjectLimit();
     INodeFile newNode = null;
     INodesInPath parent = FSDirMkdirOp.createMissingDirs(fsd,
-        iip.getParentINodesInPath(), permissions);
+        iip.getParentINodesInPath(), permissions, true);
     if (parent != null) {
       iip = addFile(fsd, parent, iip.getLastLocalName(), permissions,
           replication, blockSize, holder, clientMachine, shouldReplicate,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a8fb490..96324e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1787,7 +1787,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   @Override
   public boolean hasWriteLock() {
     return this.fsLock.isWriteLockedByCurrentThread() ||
-        fsLock.haswWriteChildLock();
+        fsLock.hasWriteChildLock();
   }
   @Override
   public boolean hasReadLock() {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index f53f10d..aa54008 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -149,7 +149,7 @@ class FSNamesystemLock {
     return partitionLocks.get().remove(lock);
   }
 
-  boolean haswWriteChildLock() {
+  boolean hasWriteChildLock() {
     Iterator<INodeMapLock> iter = partitionLocks.get().iterator();
     // FSNamesystem.LOG.debug("partitionLocks.size = {}", 
partitionLocks.get().size());
     while(iter.hasNext()) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
index 2814b9f..0fdda96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
@@ -153,7 +153,8 @@ public class INodeMap {
 
     @Override
     protected boolean hasWriteChildLock() {
-      return this.childLock.isWriteLockedByCurrentThread();
+      return this.childLock.isWriteLockedByCurrentThread() || namesystem
+          .getFSLock().hasWriteChildLock();
     }
 
     @Override
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
index e19f3281..bae98cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
@@ -25,6 +25,8 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.InvalidPathException;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -152,4 +154,55 @@ public class TestDFSMkdirs {
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testMkDirsWithRestart() throws IOException {
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    DistributedFileSystem dfs = cluster.getFileSystem();
+    try {
+      Path dir1 = new Path("/mkdir-1");
+      Path file1 = new Path(dir1, "file1");
+      Path deleteDir = new Path("/deleteDir");
+      Path deleteFile = new Path(dir1, "deleteFile");
+      // Create a dir in root dir, should succeed
+      assertTrue(dfs.mkdir(dir1, FsPermission.getDefault()));
+      dfs.mkdir(deleteDir, FsPermission.getDefault());
+      assertTrue(dfs.exists(deleteDir));
+      dfs.delete(deleteDir, true);
+      assertTrue(!dfs.exists(deleteDir));
+
+      DFSTestUtil.writeFile(dfs, file1, "hello world");
+      DFSTestUtil.writeFile(dfs, deleteFile, "hello world");
+      int totalFiles = getFileCount(dfs);
+      //Before deletion there are 2 files
+      assertTrue("Incorrect file count", 2 == totalFiles);
+      dfs.delete(deleteFile, false);
+      totalFiles = getFileCount(dfs);
+      //After deletion, left with 1 file
+      assertTrue("Incorrect file count", 1 == totalFiles);
+
+      cluster.restartNameNodes();
+      dfs = cluster.getFileSystem();
+      assertTrue(dfs.exists(dir1));
+      assertTrue(!dfs.exists(deleteDir));
+      assertTrue(dfs.exists(file1));
+      totalFiles = getFileCount(dfs);
+      assertTrue("Incorrect file count", 1 == totalFiles);
+    } finally {
+      dfs.close();
+      cluster.shutdown();
+    }
+  }
+
+  private int getFileCount(DistributedFileSystem dfs) throws IOException {
+    RemoteIterator<LocatedFileStatus> fileItr =
+        dfs.listFiles(new Path("/"), true);
+    int totalFiles = 0;
+    while (fileItr.hasNext()) {
+      fileItr.next();
+      totalFiles++;
+    }
+    return totalFiles;
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index a7cf68b..2b5f049 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -1313,6 +1313,10 @@ public class TestFileCreation {
         fail();
       } catch(FileNotFoundException e) {
         FileSystem.LOG.info("Caught Expected FileNotFoundException: ", e);
+      } catch (AssertionError ae) {
+        //FSDirWriteFileOp#completeFile throws AssertError if the given
+        // id/node is not an instance of INodeFile.
+        FileSystem.LOG.info("Caught Expected AssertionError: ", ae);
       }
     } finally {
       IOUtils.closeStream(dfs);

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to