Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 56c779942 -> 9c318f70b


HDFS-12907. Allow read-only access to reserved raw for non-superusers. 
Contributed by Rushabh S Shah.

(cherry picked from commit 08a50da95fd8509d87c6a3e5b46862b8eb6f7318)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c318f70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c318f70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c318f70

Branch: refs/heads/branch-2.8
Commit: 9c318f70bb8b955342a9aaa399f9682cfd296db2
Parents: 56c7799
Author: Kihwal Lee <kih...@apache.org>
Authored: Thu Dec 14 15:13:47 2017 -0600
Committer: Kihwal Lee <kih...@apache.org>
Committed: Thu Dec 14 15:13:47 2017 -0600

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirectory.java       |  9 ++-
 .../server/namenode/XAttrPermissionFilter.java  | 10 +--
 .../hadoop/hdfs/TestReservedRawPaths.java       | 22 ++----
 .../hdfs/server/namenode/FSXAttrBaseTest.java   | 80 ++++++++++++++++----
 4 files changed, 86 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c318f70/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 3ce2254..6e815c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -533,7 +533,14 @@ public class FSDirectory implements Closeable {
     byte[][] components = INode.getPathComponents(src);
     boolean isRaw = isReservedRawName(components);
     if (isPermissionEnabled && pc != null && isRaw) {
-      pc.checkSuperuserPrivilege();
+      switch(dirOp) {
+        case READ_LINK:
+        case READ:
+          break;
+        default:
+          pc.checkSuperuserPrivilege();
+          break;
+      }
     }
     components = resolveComponents(components, this);
     INodesInPath iip = INodesInPath.resolve(rootDir, components, isRaw);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c318f70/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
index de448f6..e1bf027 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
@@ -54,7 +54,8 @@ import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_
  *   attributes that sometimes need to be exposed. Like SYSTEM namespace
  *   attributes they are not visible to the user except when getXAttr/getXAttrs
  *   is called on a file or directory in the /.reserved/raw HDFS directory
- *   hierarchy. These attributes can only be accessed by the superuser.
+ *   hierarchy. These attributes can only be accessed by the user who have
+ *   read access.
  * </br>
  */
 @InterfaceAudience.Private
@@ -68,8 +69,7 @@ public class XAttrPermissionFilter {
         (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser)) {
       return;
     }
-    if (xAttr.getNameSpace() == XAttr.NameSpace.RAW &&
-        isRawPath && isSuperUser) {
+    if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isRawPath) {
       return;
     }
     if (XAttrHelper.getPrefixedName(xAttr).
@@ -112,15 +112,13 @@ public class XAttrPermissionFilter {
       } else if (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && 
           isSuperUser) {
         filteredXAttrs.add(xAttr);
-      } else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW &&
-          isSuperUser && isRawPath) {
+      } else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isRawPath) {
         filteredXAttrs.add(xAttr);
       } else if (XAttrHelper.getPrefixedName(xAttr).
           equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
         filteredXAttrs.add(xAttr);
       }
     }
-    
     return filteredXAttrs;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c318f70/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
index 3f57dcf..12b86cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
@@ -247,7 +247,7 @@ public class TestReservedRawPaths {
   }
 
   @Test(timeout = 120000)
-  public void testAdminAccessOnly() throws Exception {
+  public void testUserReadAccessOnly() throws Exception {
     final Path zone = new Path("zone");
     final Path slashZone = new Path("/", zone);
     fs.mkdirs(slashZone);
@@ -275,34 +275,26 @@ public class TestReservedRawPaths {
       }
     });
 
-    /* Test failure of getFileStatus in reserved/raw as non admin */
+    /* Test success of getFileStatus in reserved/raw as non admin since
+     * read is allowed. */
     final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base);
     DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
     user.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws Exception {
         final DistributedFileSystem fs = cluster.getFileSystem();
-        try {
-          fs.getFileStatus(ezRawEncFile);
-          fail("access to /.reserved/raw is superuser-only operation");
-        } catch (AccessControlException e) {
-          assertExceptionContains("Superuser privilege is required", e);
-        }
+        fs.getFileStatus(ezRawEncFile);
         return null;
       }
     });
 
-    /* Test failure of listStatus in reserved/raw as non admin */
+    /* Test success of listStatus in reserved/raw as non admin since read is
+     * allowed. */
     user.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws Exception {
         final DistributedFileSystem fs = cluster.getFileSystem();
-        try {
-          fs.listStatus(ezRawEncFile);
-          fail("access to /.reserved/raw is superuser-only operation");
-        } catch (AccessControlException e) {
-          assertExceptionContains("Superuser privilege is required", e);
-        }
+        fs.listStatus(ezRawEncFile);
         return null;
       }
     });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c318f70/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index c526484..87a975d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
@@ -32,6 +33,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
@@ -1081,7 +1083,8 @@ public class FSXAttrBaseTest {
 
     {
       /*
-       * Test that non-root can not do getXAttr in the "raw.*" namespace
+       * Test that user who don'r have read access
+       *  can not do getXAttr in the "raw.*" namespace
        */
       fs.setXAttr(rawPath, raw1, value1);
       user.doAs(new PrivilegedExceptionAction<Object>() {
@@ -1089,7 +1092,7 @@ public class FSXAttrBaseTest {
           public Object run() throws Exception {
             final FileSystem userFs = dfsCluster.getFileSystem();
             try {
-              // non-raw path
+              // raw path
               userFs.getXAttr(rawPath, raw1);
               fail("getXAttr should have thrown");
             } catch (AccessControlException e) {
@@ -1097,7 +1100,7 @@ public class FSXAttrBaseTest {
             }
 
             try {
-              // raw path
+              // non-raw path
               userFs.getXAttr(path, raw1);
               fail("getXAttr should have thrown");
             } catch (AccessControlException e) {
@@ -1105,25 +1108,76 @@ public class FSXAttrBaseTest {
             }
 
             /*
-             * Test that only root can see raw.* xattrs returned from listXAttr
-             * and non-root can't do listXAttrs on /.reserved/raw.
-             */
+            * Test that only user who have parent directory execute access
+            *  can see raw.* xattrs returned from listXAttr
+            */
             // non-raw path
             final List<String> xattrNames = userFs.listXAttrs(path);
             assertTrue(xattrNames.size() == 0);
-            try {
-              // raw path
-              userFs.listXAttrs(rawPath);
-              fail("listXAttrs on raw path should have thrown");
-            } catch (AccessControlException e) {
-              // ignore
-            }
 
+            // raw path
+            List<String> rawXattrs = userFs.listXAttrs(rawPath);
+            assertTrue(rawXattrs.size() == 1);
+            assertTrue(rawXattrs.get(0).equals(raw1));
             return null;
           }
         });
       fs.removeXAttr(rawPath, raw1);
     }
+
+    {
+      /*
+       * Tests that user who have read access are able to do getattr.
+       */
+      final Path parentPath = new Path("/foo");
+      fs.mkdirs(parentPath);
+      fs.setOwner(parentPath, "user", "mygroup");
+      // Set only execute permission for others on parent directory so that
+      // any user can traverse down the directory.
+      fs.setPermission(parentPath, new FsPermission("701"));
+      final Path childPath = new Path("/foo/bar");
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final DistributedFileSystem dfs = dfsCluster.getFileSystem();
+          DFSTestUtil.createFile(dfs, childPath, 1024, (short) 1, 0xFEED);
+          dfs.setPermission(childPath, new FsPermission("740"));
+          return null;
+        }
+      });
+      final Path rawChildPath =
+          new Path("/.reserved/raw" + childPath.toString());
+      fs.setXAttr(new Path("/.reserved/raw/foo/bar"), raw1, value1);
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final DistributedFileSystem dfs = dfsCluster.getFileSystem();
+          // Make sure user have access to raw xattr.
+          byte[] xattr = dfs.getXAttr(rawChildPath, raw1);
+          assertEquals(Arrays.toString(value1), Arrays.toString(xattr));
+          return null;
+        }
+      });
+
+      final UserGroupInformation fakeUser = UserGroupInformation
+          .createUserForTesting("fakeUser", new String[] {"fakeGroup"});
+      fakeUser.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final DistributedFileSystem dfs = dfsCluster.getFileSystem();
+          try {
+            // Make sure user who don't have read access to file can't access
+            // raw xattr.
+            dfs.getXAttr(path, raw1);
+            fail("should have thrown AccessControlException");
+          } catch (AccessControlException ace) {
+            // expected
+          }
+          return null;
+        }
+      });
+      // fs.removeXAttr(rawPath, raw1);
+    }
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to