[ 
https://issues.apache.org/jira/browse/HDFS-16132?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Shashikant Banerjee updated HDFS-16132:
---------------------------------------
    Description: 
The issue can be reproduced with the below unit test:
{code:java}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index 512d1029835..27b80882766 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Lists;
@@ -89,7 +90,7 @@ public void checkPermissionWithContext(
           AuthorizationContext authzContext) throws AccessControlException {
         if (authzContext.getAncestorIndex() > 1
             && authzContext.getInodes()[1].getLocalName().equals("user")
-            && authzContext.getInodes()[2].getLocalName().equals("acl")) {
+            && authzContext.getInodes()[2].getLocalName().equals("acl") || 
runPermissionCheck) {
           this.ace.checkPermissionWithContext(authzContext);
         }
         CALLED.add("checkPermission|" + authzContext.getAncestorAccess()
@@ -598,6 +599,55 @@ public Void run() throws Exception {
         return null;
       }
     });
+  }
 
+  @Test
+  public void testAttrProviderSeesResolvedSnapahotPaths1() throws Exception {
+    runPermissionCheck = true;
+    FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+    DistributedFileSystem hdfs = miniDFS.getFileSystem();
+    final Path parent = new Path("/user");
+    hdfs.mkdirs(parent);
+    fs.setPermission(parent, new FsPermission(HDFS_PERMISSION));
+    final Path sub1 = new Path(parent, "sub1");
+    final Path sub1foo = new Path(sub1, "foo");
+    hdfs.mkdirs(sub1);
+    hdfs.mkdirs(sub1foo);
+    Path f = new Path(sub1foo, "file0");
+    DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0);
+    hdfs.allowSnapshot(parent);
+    hdfs.createSnapshot(parent, "s0");
+
+    f = new Path(sub1foo, "file1");
+    DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0);
+    f = new Path(sub1foo, "file2");
+    DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0);
+
+    final Path sub2 = new Path(parent, "sub2");
+    hdfs.mkdirs(sub2);
+    final Path sub2foo = new Path(sub2, "foo");
+    // mv /parent/sub1/foo to /parent/sub2/foo
+    hdfs.rename(sub1foo, sub2foo);
+
+    hdfs.createSnapshot(parent, "s1");
+    hdfs.createSnapshot(parent, "s2");
+
+    final Path sub3 = new Path(parent, "sub3");
+    hdfs.mkdirs(sub3);
+    // mv /parent/sub2/foo to /parent/sub3/foo
+    hdfs.rename(sub2foo, sub3);
+
+    hdfs.delete(sub3, true);
+    UserGroupInformation ugi =
+        UserGroupInformation.createUserForTesting("u1", new String[] { "g1" });
+    ugi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+        ((DistributedFileSystem)fs).getSnapshotDiffReport(parent, "s1", "s2");
+        CALLED.clear();
+        return null;
+      }
+    });
   }
 }
{code}
It fails with the below error when executed:
{code:java}
org.apache.hadoop.ipc.RemoteException(java.lang.AssertionError): Absolute path 
required, but got 
'foo'org.apache.hadoop.ipc.RemoteException(java.lang.AssertionError): Absolute 
path required, but got 'foo' at 
org.apache.hadoop.hdfs.server.namenode.INode.checkAbsolutePath(INode.java:838) 
at 
org.apache.hadoop.hdfs.server.namenode.INode.getPathComponents(INode.java:813) 
at 
org.apache.hadoop.hdfs.server.namenode.INodesInPath.resolveFromRoot(INodesInPath.java:154)
 at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.getINodeAttrs(FSPermissionChecker.java:447)
 at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkSubAccess(FSPermissionChecker.java:507)
 at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:403)
 at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermissionWithContext(FSPermissionChecker.java:417)
 at 
org.apache.hadoop.hdfs.server.namenode.TestINodeAttributeProvider$MyAuthorizationProvider$MyAccessControlEnforcer.checkPermissionWithContext(TestINodeAttributeProvider.java:94)
 at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:297)
 at 
org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1951)
 at 
org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1932)
 at 
org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSubtreeReadPermission(FSDirSnapshotOp.java:317)
 at 
org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.getSnapshotDiffReportListing(FSDirSnapshotOp.java:208)
 at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getSnapshotDiffReportListing(FSNamesystem.java:7238)
 at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.getSnapshotDiffReportListing(NameNodeRpcServer.java:2045){code}
This is a regression with https://issues.apache.org/jira/browse/HDFS-15372.

  was:
The issue can be reproduced with the below unit test:
{code:java}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 index 512d1029835..27b80882766 100644 --- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 +++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import 
org.apache.hadoop.hdfs.HdfsConfiguration; import 
org.apache.hadoop.hdfs.MiniDFSCluster; +import 
org.apache.hadoop.hdfs.DFSTestUtil; import 
org.apache.hadoop.security.AccessControlException; import 
org.apache.hadoop.security.UserGroupInformation; import 
org.apache.hadoop.util.Lists; @@ -89,7 +90,7 @@ public void 
checkPermissionWithContext(            AuthorizationContext authzContext) 
throws AccessControlException {          if (authzContext.getAncestorIndex() > 
1              && authzContext.getInodes()[1].getLocalName().equals("user") -   
         && authzContext.getInodes()[2].getLocalName().equals("acl")) { +       
     && authzContext.getInodes()[2].getLocalName().equals("acl") || 
runPermissionCheck) {            
this.ace.checkPermissionWithContext(authzContext);          }          
CALLED.add("checkPermission|" + authzContext.getAncestorAccess() @@ -598,6 
+599,55 @@ public Void run() throws Exception {          return null;        }  
    }); +  } +  @Test +  public void 
testAttrProviderSeesResolvedSnapahotPaths1() throws Exception { +    
runPermissionCheck = true; +    FileSystem fs = 
FileSystem.get(miniDFS.getConfiguration(0)); +    DistributedFileSystem hdfs = 
miniDFS.getFileSystem(); +    final Path parent = new Path("/user"); +    
hdfs.mkdirs(parent); +    fs.setPermission(parent, new 
FsPermission(HDFS_PERMISSION)); +    final Path sub1 = new Path(parent, 
"sub1"); +    final Path sub1foo = new Path(sub1, "foo"); +    
hdfs.mkdirs(sub1); +    hdfs.mkdirs(sub1foo); +    Path f = new Path(sub1foo, 
"file0"); +    DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0); +    
hdfs.allowSnapshot(parent); +    hdfs.createSnapshot(parent, "s0"); + +    f = 
new Path(sub1foo, "file1"); +    DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 
0); +    f = new Path(sub1foo, "file2"); +    DFSTestUtil.createFile(hdfs, f, 
0, (short) 1, 0); + +    final Path sub2 = new Path(parent, "sub2"); +    
hdfs.mkdirs(sub2); +    final Path sub2foo = new Path(sub2, "foo"); +    // mv 
/parent/sub1/foo to /parent/sub2/foo +    hdfs.rename(sub1foo, sub2foo); + +    
hdfs.createSnapshot(parent, "s1"); +    hdfs.createSnapshot(parent, "s2"); + +  
  final Path sub3 = new Path(parent, "sub3"); +    hdfs.mkdirs(sub3); +    // 
mv /parent/sub2/foo to /parent/sub3/foo +    hdfs.rename(sub2foo, sub3); + +    
hdfs.delete(sub3, true); +    UserGroupInformation ugi = +        
UserGroupInformation.createUserForTesting("u1", new String[] { "g1" }); +    
ugi.doAs(new PrivilegedExceptionAction<Void>() { +      @Override +      public 
Void run() throws Exception { +        FileSystem fs = 
FileSystem.get(miniDFS.getConfiguration(0)); +        
((DistributedFileSystem)fs).getSnapshotDiffReport(parent, "s1", "s2"); +        
CALLED.clear(); +        return null; +      } +    });    } }
{code}
It fails with the below error when executed:
{code:java}
org.apache.hadoop.ipc.RemoteException(java.lang.AssertionError): Absolute path 
required, but got 
'foo'org.apache.hadoop.ipc.RemoteException(java.lang.AssertionError): Absolute 
path required, but got 'foo' at 
org.apache.hadoop.hdfs.server.namenode.INode.checkAbsolutePath(INode.java:838) 
at 
org.apache.hadoop.hdfs.server.namenode.INode.getPathComponents(INode.java:813) 
at 
org.apache.hadoop.hdfs.server.namenode.INodesInPath.resolveFromRoot(INodesInPath.java:154)
 at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.getINodeAttrs(FSPermissionChecker.java:447)
 at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkSubAccess(FSPermissionChecker.java:507)
 at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:403)
 at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermissionWithContext(FSPermissionChecker.java:417)
 at 
org.apache.hadoop.hdfs.server.namenode.TestINodeAttributeProvider$MyAuthorizationProvider$MyAccessControlEnforcer.checkPermissionWithContext(TestINodeAttributeProvider.java:94)
 at 
org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:297)
 at 
org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1951)
 at 
org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1932)
 at 
org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSubtreeReadPermission(FSDirSnapshotOp.java:317)
 at 
org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.getSnapshotDiffReportListing(FSDirSnapshotOp.java:208)
 at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getSnapshotDiffReportListing(FSNamesystem.java:7238)
 at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.getSnapshotDiffReportListing(NameNodeRpcServer.java:2045){code}
This is a regression with https://issues.apache.org/jira/browse/HDFS-15372.


> SnapshotDiff report fails with invalid path assertion with external Attribute 
> provider
> --------------------------------------------------------------------------------------
>
>                 Key: HDFS-16132
>                 URL: https://issues.apache.org/jira/browse/HDFS-16132
>             Project: Hadoop HDFS
>          Issue Type: Bug
>            Reporter: Shashikant Banerjee
>            Assignee: Shashikant Banerjee
>            Priority: Major
>
> The issue can be reproduced with the below unit test:
> {code:java}
> diff --git 
> a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
>  
> b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
> index 512d1029835..27b80882766 100644
> --- 
> a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
> +++ 
> b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
> @@ -36,6 +36,7 @@
>  import org.apache.hadoop.hdfs.DistributedFileSystem;
>  import org.apache.hadoop.hdfs.HdfsConfiguration;
>  import org.apache.hadoop.hdfs.MiniDFSCluster;
> +import org.apache.hadoop.hdfs.DFSTestUtil;
>  import org.apache.hadoop.security.AccessControlException;
>  import org.apache.hadoop.security.UserGroupInformation;
>  import org.apache.hadoop.util.Lists;
> @@ -89,7 +90,7 @@ public void checkPermissionWithContext(
>            AuthorizationContext authzContext) throws AccessControlException {
>          if (authzContext.getAncestorIndex() > 1
>              && authzContext.getInodes()[1].getLocalName().equals("user")
> -            && authzContext.getInodes()[2].getLocalName().equals("acl")) {
> +            && authzContext.getInodes()[2].getLocalName().equals("acl") || 
> runPermissionCheck) {
>            this.ace.checkPermissionWithContext(authzContext);
>          }
>          CALLED.add("checkPermission|" + authzContext.getAncestorAccess()
> @@ -598,6 +599,55 @@ public Void run() throws Exception {
>          return null;
>        }
>      });
> +  }
>  
> +  @Test
> +  public void testAttrProviderSeesResolvedSnapahotPaths1() throws Exception {
> +    runPermissionCheck = true;
> +    FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
> +    DistributedFileSystem hdfs = miniDFS.getFileSystem();
> +    final Path parent = new Path("/user");
> +    hdfs.mkdirs(parent);
> +    fs.setPermission(parent, new FsPermission(HDFS_PERMISSION));
> +    final Path sub1 = new Path(parent, "sub1");
> +    final Path sub1foo = new Path(sub1, "foo");
> +    hdfs.mkdirs(sub1);
> +    hdfs.mkdirs(sub1foo);
> +    Path f = new Path(sub1foo, "file0");
> +    DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0);
> +    hdfs.allowSnapshot(parent);
> +    hdfs.createSnapshot(parent, "s0");
> +
> +    f = new Path(sub1foo, "file1");
> +    DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0);
> +    f = new Path(sub1foo, "file2");
> +    DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0);
> +
> +    final Path sub2 = new Path(parent, "sub2");
> +    hdfs.mkdirs(sub2);
> +    final Path sub2foo = new Path(sub2, "foo");
> +    // mv /parent/sub1/foo to /parent/sub2/foo
> +    hdfs.rename(sub1foo, sub2foo);
> +
> +    hdfs.createSnapshot(parent, "s1");
> +    hdfs.createSnapshot(parent, "s2");
> +
> +    final Path sub3 = new Path(parent, "sub3");
> +    hdfs.mkdirs(sub3);
> +    // mv /parent/sub2/foo to /parent/sub3/foo
> +    hdfs.rename(sub2foo, sub3);
> +
> +    hdfs.delete(sub3, true);
> +    UserGroupInformation ugi =
> +        UserGroupInformation.createUserForTesting("u1", new String[] { "g1" 
> });
> +    ugi.doAs(new PrivilegedExceptionAction<Void>() {
> +      @Override
> +      public Void run() throws Exception {
> +        FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
> +        ((DistributedFileSystem)fs).getSnapshotDiffReport(parent, "s1", 
> "s2");
> +        CALLED.clear();
> +        return null;
> +      }
> +    });
>    }
>  }
> {code}
> It fails with the below error when executed:
> {code:java}
> org.apache.hadoop.ipc.RemoteException(java.lang.AssertionError): Absolute 
> path required, but got 
> 'foo'org.apache.hadoop.ipc.RemoteException(java.lang.AssertionError): 
> Absolute path required, but got 'foo' at 
> org.apache.hadoop.hdfs.server.namenode.INode.checkAbsolutePath(INode.java:838)
>  at 
> org.apache.hadoop.hdfs.server.namenode.INode.getPathComponents(INode.java:813)
>  at 
> org.apache.hadoop.hdfs.server.namenode.INodesInPath.resolveFromRoot(INodesInPath.java:154)
>  at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.getINodeAttrs(FSPermissionChecker.java:447)
>  at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkSubAccess(FSPermissionChecker.java:507)
>  at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:403)
>  at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermissionWithContext(FSPermissionChecker.java:417)
>  at 
> org.apache.hadoop.hdfs.server.namenode.TestINodeAttributeProvider$MyAuthorizationProvider$MyAccessControlEnforcer.checkPermissionWithContext(TestINodeAttributeProvider.java:94)
>  at 
> org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:297)
>  at 
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1951)
>  at 
> org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1932)
>  at 
> org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSubtreeReadPermission(FSDirSnapshotOp.java:317)
>  at 
> org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.getSnapshotDiffReportListing(FSDirSnapshotOp.java:208)
>  at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getSnapshotDiffReportListing(FSNamesystem.java:7238)
>  at 
> org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.getSnapshotDiffReportListing(NameNodeRpcServer.java:2045){code}
> This is a regression with https://issues.apache.org/jira/browse/HDFS-15372.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to