Repository: ranger
Updated Branches:
  refs/heads/master 760c4a261 -> 6ef3d5415


RANGER-2297: getContentSummary validation failure


Project: http://git-wip-us.apache.org/repos/asf/ranger/repo
Commit: http://git-wip-us.apache.org/repos/asf/ranger/commit/6ef3d541
Tree: http://git-wip-us.apache.org/repos/asf/ranger/tree/6ef3d541
Diff: http://git-wip-us.apache.org/repos/asf/ranger/diff/6ef3d541

Branch: refs/heads/master
Commit: 6ef3d5415aa1fd9810638738936753cff79f5390
Parents: 760c4a2
Author: Abhay Kulkarni <[email protected]>
Authored: Thu Nov 29 11:54:40 2018 -0800
Committer: Abhay Kulkarni <[email protected]>
Committed: Thu Nov 29 11:54:40 2018 -0800

----------------------------------------------------------------------
 .../hadoop/RangerHdfsAuthorizer.java            | 44 +++++++++++++++++---
 1 file changed, 39 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ranger/blob/6ef3d541/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java
----------------------------------------------------------------------
diff --git 
a/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java
 
b/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java
index 65a397d..466f22a 100644
--- 
a/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java
+++ 
b/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider;
 import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.security.AccessControlException;
@@ -241,10 +242,14 @@ public class RangerHdfsAuthorizer extends 
INodeAttributeProvider {
                                INode   parent              = null;
                                INode   inode               = null;
 
+                               boolean useDefaultAuthorizerOnly = false;
+                               boolean doNotGenerateAuditRecord = false;
+
                                if(plugin != null && 
!ArrayUtils.isEmpty(inodes)) {
                                        int sz = inodeAttrs.length;
                                        if (LOG.isDebugEnabled()) {
                                                LOG.debug("Size of INodeAttrs 
array:[" + sz + "]");
+                                               LOG.debug("Size of INodes 
array:[" + inodes.length + "]");
                                        }
                                        byte[][] components = new byte[sz][];
 
@@ -259,11 +264,40 @@ public class RangerHdfsAuthorizer extends 
INodeAttributeProvider {
                                        if (i != sz) {
                                                if (LOG.isDebugEnabled()) {
                                                        LOG.debug("Input 
INodeAttributes array contains null at position " + i);
-                                                       LOG.debug("Will use 
only first [" + i + "] components to build resourcePath");
+                                                       LOG.debug("Will use 
only first [" + i + "] components");
                                                }
                                        }
 
-                                       resourcePath = 
DFSUtil.byteArray2PathString(components, 0, i);
+                                       if (sz == 1 && inodes.length == 1 && 
inodes[0].getParent() != null) {
+
+                                               doNotGenerateAuditRecord = true;
+
+                                               if (LOG.isDebugEnabled()) {
+                                                       LOG.debug("Using the 
only inode in the array to figure out path to resource. No audit record will be 
generated for this authorization request");
+                                               }
+
+                                               resourcePath = 
inodes[0].getFullPathName();
+
+                                               if (snapshotId != 
Snapshot.CURRENT_STATE_ID) {
+
+                                                       
useDefaultAuthorizerOnly = true;
+
+                                                       if 
(LOG.isDebugEnabled()) {
+                                                               
LOG.debug("path:[" + resourcePath + "] is for a snapshot, id=[" + snapshotId 
+"], default Authorizer will be used to authorize this request");
+                                                       }
+                                               } else {
+                                                       if 
(LOG.isDebugEnabled()) {
+                                                               
LOG.debug("path:[" + resourcePath + "] is not for a snapshot, id=[" + 
snapshotId +"]. It will be used to authorize this request");
+                                                       }
+                                               }
+                                       } else {
+
+                                               resourcePath = 
DFSUtil.byteArray2PathString(components, 0, i);
+
+                                               if (LOG.isDebugEnabled()) {
+                                                       
LOG.debug("INodeAttributes array is used to figure out path to resource, 
resourcePath:[" + resourcePath +"]");
+                                               }
+                                       }
 
                                        if(ancestorIndex >= inodes.length) {
                                                ancestorIndex = inodes.length - 
1;
@@ -271,13 +305,13 @@ public class RangerHdfsAuthorizer extends 
INodeAttributeProvider {
 
                                        for(; ancestorIndex >= 0 && 
inodes[ancestorIndex] == null; ancestorIndex--);
 
-                                       authzStatus = AuthzStatus.ALLOW;
+                                       authzStatus = useDefaultAuthorizerOnly 
? AuthzStatus.NOT_DETERMINED : AuthzStatus.ALLOW;
 
                                        ancestor = inodes.length > 
ancestorIndex && ancestorIndex >= 0 ? inodes[ancestorIndex] : null;
                                        parent   = inodes.length > 1 ? 
inodes[inodes.length - 2] : null;
                                        inode    = inodes[inodes.length - 1]; 
// could be null while creating a new file
 
-                                       auditHandler = new 
RangerHdfsAuditHandler(resourcePath, isTraverseOnlyCheck);
+                                       auditHandler = doNotGenerateAuditRecord 
? null : new RangerHdfsAuditHandler(resourcePath, isTraverseOnlyCheck);
 
                                        /* Hadoop versions prior to 2.8.0 
didn't ask for authorization of parent/ancestor traversal for
                                         * reading or writing a file. However, 
Hadoop version 2.8.0 and later ask traversal authorization for
@@ -294,7 +328,7 @@ public class RangerHdfsAuthorizer extends 
INodeAttributeProvider {
                                         * This approach would ensure that 
Ranger authorization will continue to work with existing policies,
                                         * without requiring policy 
migration/update, for the changes in behaviour in Hadoop 2.8.0.
                                         */
-                                       if(isTraverseOnlyCheck) {
+                                       if(authzStatus == AuthzStatus.ALLOW && 
isTraverseOnlyCheck) {
                                                authzStatus = 
traverseOnlyCheck(inode, inodeAttrs, resourcePath, components, parent, 
ancestor, ancestorIndex, user, groups, plugin, auditHandler);
                                        }
 

Reply via email to