Repository: hadoop Updated Branches: refs/heads/branch-2.8 9b595ba40 -> 95e8f4c80
HDFS-11641. Reduce cost of audit logging by using FileStatus instead of HdfsFileStatus. Contributed by Daryn Sharp. (cherry picked from commit 82ea3f4545c88f2dc106e63afd6fcd616bb120be) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95e8f4c8 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95e8f4c8 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95e8f4c8 Branch: refs/heads/branch-2.8 Commit: 95e8f4c80d65642e8dfe969c42e10b2349019052 Parents: 9b595ba Author: Kihwal Lee <kih...@apache.org> Authored: Tue May 16 15:55:53 2017 -0500 Committer: Kihwal Lee <kih...@apache.org> Committed: Tue May 16 15:55:53 2017 -0500 ---------------------------------------------------------------------- .../hadoop/hdfs/server/namenode/FSDirAclOp.java | 12 ++--- .../hdfs/server/namenode/FSDirAttrOp.java | 14 ++--- .../hdfs/server/namenode/FSDirConcatOp.java | 4 +- .../server/namenode/FSDirEncryptionZoneOp.java | 10 ++-- .../hdfs/server/namenode/FSDirMkdirOp.java | 4 +- .../hdfs/server/namenode/FSDirRenameOp.java | 8 +-- .../hdfs/server/namenode/FSDirSymlinkOp.java | 4 +- .../hdfs/server/namenode/FSDirTruncateOp.java | 8 +-- .../hdfs/server/namenode/FSDirXAttrOp.java | 6 +-- .../hdfs/server/namenode/FSDirectory.java | 43 +++++++++++++-- .../hdfs/server/namenode/FSNamesystem.java | 56 +++++++++++--------- 11 files changed, 107 insertions(+), 62 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/95e8f4c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java index 25ca09b..a41ff3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; @@ -25,7 +26,6 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.AclException; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import java.io.IOException; @@ -33,7 +33,7 @@ import java.util.Collections; import java.util.List; class FSDirAclOp { - static HdfsFileStatus modifyAclEntries( + static FileStatus modifyAclEntries( FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec) throws IOException { String src = srcArg; @@ -58,7 +58,7 @@ class FSDirAclOp { return fsd.getAuditFileInfo(iip); } - static HdfsFileStatus removeAclEntries( + static FileStatus removeAclEntries( FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec) throws IOException { String src = srcArg; @@ -83,7 +83,7 @@ class FSDirAclOp { return fsd.getAuditFileInfo(iip); } - static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg) + static FileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg) throws IOException { String src = srcArg; checkAclsConfigFlag(fsd); @@ -107,7 +107,7 @@ class FSDirAclOp { return fsd.getAuditFileInfo(iip); } - static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg) + static FileStatus removeAcl(FSDirectory fsd, final String srcArg) throws IOException { String src = srcArg; checkAclsConfigFlag(fsd); @@ -126,7 +126,7 @@ class FSDirAclOp { return fsd.getAuditFileInfo(iip); } - static HdfsFileStatus setAcl( + static FileStatus setAcl( FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec) throws IOException { String src = srcArg; http://git-wip-us.apache.org/repos/asf/hadoop/blob/95e8f4c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index c9663dc..9e714af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.StorageType; @@ -28,7 +29,6 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; @@ -50,7 +50,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENAB import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY; public class FSDirAttrOp { - static HdfsFileStatus setPermission( + static FileStatus setPermission( FSDirectory fsd, final String src, FsPermission permission) throws IOException { if (FSDirectory.isExactReservedName(src)) { @@ -70,7 +70,7 @@ public class FSDirAttrOp { return fsd.getAuditFileInfo(iip); } - static HdfsFileStatus setOwner( + static FileStatus setOwner( FSDirectory fsd, String src, String username, String group) throws IOException { if (FSDirectory.isExactReservedName(src)) { @@ -100,7 +100,7 @@ public class FSDirAttrOp { return fsd.getAuditFileInfo(iip); } - static HdfsFileStatus setTimes( + static FileStatus setTimes( FSDirectory fsd, String src, long mtime, long atime) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); @@ -153,13 +153,13 @@ public class FSDirAttrOp { return isFile; } - static HdfsFileStatus unsetStoragePolicy(FSDirectory fsd, BlockManager bm, + static FileStatus unsetStoragePolicy(FSDirectory fsd, BlockManager bm, String src) throws IOException { return setStoragePolicy(fsd, bm, src, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, "unset"); } - static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm, + static FileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm, String src, final String policyName) throws IOException { // get the corresponding policy and make sure the policy name is valid BlockStoragePolicy policy = bm.getStoragePolicy(policyName); @@ -171,7 +171,7 @@ public class FSDirAttrOp { return setStoragePolicy(fsd, bm, src, policy.getId(), "set"); } - static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm, + static FileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm, String src, final byte policyId, final String operation) throws IOException { if (!fsd.isStoragePolicyEnabled()) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/95e8f4c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index 3749e84..c51b178 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -21,9 +21,9 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; @@ -48,7 +48,7 @@ import static org.apache.hadoop.util.Time.now; */ class FSDirConcatOp { - static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs, + static FileStatus concat(FSDirectory fsd, String target, String[] srcs, boolean logRetryCache) throws IOException { validatePath(target, srcs); assert srcs != null; http://git-wip-us.apache.org/repos/asf/hadoop/blob/95e8f4c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java index d5f6be0..22039d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java @@ -34,6 +34,7 @@ import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.fs.FileEncryptionInfo; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; @@ -41,7 +42,6 @@ import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.EncryptionZone; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; @@ -143,10 +143,10 @@ final class FSDirEncryptionZoneOp { * KeyProvider * @param logRetryCache whether to record RPC ids in editlog for retry cache * rebuilding - * @return HdfsFileStatus + * @return FileStatus * @throws IOException */ - static HdfsFileStatus createEncryptionZone(final FSDirectory fsd, + static FileStatus createEncryptionZone(final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc, final String cipher, final String keyName, final boolean logRetryCache) throws IOException { final CipherSuite suite = CipherSuite.convert(cipher); @@ -177,7 +177,7 @@ final class FSDirEncryptionZoneOp { * @param pc permission checker to check fs permission * @return the EZ with file status. */ - static Map.Entry<EncryptionZone, HdfsFileStatus> getEZForPath( + static Map.Entry<EncryptionZone, FileStatus> getEZForPath( final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc) throws IOException { final INodesInPath iip; @@ -192,7 +192,7 @@ final class FSDirEncryptionZoneOp { } finally { fsd.readUnlock(); } - HdfsFileStatus auditStat = fsd.getAuditFileInfo(iip); + FileStatus auditStat = fsd.getAuditFileInfo(iip); return new AbstractMap.SimpleImmutableEntry<>(ret, auditStat); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/95e8f4c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java index a7aa293..02dd46e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.AclEntry; @@ -27,7 +28,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.AclException; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; @@ -39,7 +39,7 @@ import static org.apache.hadoop.util.Time.now; class FSDirMkdirOp { - static HdfsFileStatus mkdirs(FSNamesystem fsn, String src, + static FileStatus mkdirs(FSNamesystem fsn, String src, PermissionStatus permissions, boolean createParent) throws IOException { FSDirectory fsd = fsn.getFSDirectory(); if(NameNode.stateChangeLog.isDebugEnabled()) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/95e8f4c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index a08e8b8..fa77d5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -19,13 +19,13 @@ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; @@ -762,18 +762,18 @@ class FSDirRenameOp { INodesInPath dst, boolean filesDeleted, BlocksMapUpdateInfo collectedBlocks) throws IOException { boolean success = (dst != null); - HdfsFileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null; + FileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null; return new RenameResult( success, auditStat, filesDeleted, collectedBlocks); } static class RenameResult { final boolean success; - final HdfsFileStatus auditStat; + final FileStatus auditStat; final boolean filesDeleted; final BlocksMapUpdateInfo collectedBlocks; - RenameResult(boolean success, HdfsFileStatus auditStat, + RenameResult(boolean success, FileStatus auditStat, boolean filesDeleted, BlocksMapUpdateInfo collectedBlocks) { this.success = success; this.auditStat = auditStat; http://git-wip-us.apache.org/repos/asf/hadoop/blob/95e8f4c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java index c5a7382..8c02269 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; @@ -33,7 +33,7 @@ import static org.apache.hadoop.util.Time.now; class FSDirSymlinkOp { - static HdfsFileStatus createSymlinkInt( + static FileStatus createSymlinkInt( FSNamesystem fsn, String target, final String linkArg, PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) throws IOException { http://git-wip-us.apache.org/repos/asf/hadoop/blob/95e8f4c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index 4184200..d487228 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -20,11 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; @@ -335,9 +335,9 @@ final class FSDirTruncateOp { */ static class TruncateResult { private final boolean result; - private final HdfsFileStatus stat; + private final FileStatus stat; - public TruncateResult(boolean result, HdfsFileStatus stat) { + public TruncateResult(boolean result, FileStatus stat) { this.result = result; this.stat = stat; } @@ -353,7 +353,7 @@ final class FSDirTruncateOp { /** * @return file information. */ - HdfsFileStatus getFileStatus() { + FileStatus getFileStatus() { return stat; } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/95e8f4c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java index f676f36..e5243ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -21,13 +21,13 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.XAttrHelper; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; @@ -59,7 +59,7 @@ class FSDirXAttrOp { * - xAttrs flags * @throws IOException */ - static HdfsFileStatus setXAttr( + static FileStatus setXAttr( FSDirectory fsd, String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag, boolean logRetryCache) throws IOException { @@ -153,7 +153,7 @@ class FSDirXAttrOp { * - xAttr to remove * @throws IOException */ - static HdfsFileStatus removeXAttr( + static FileStatus removeXAttr( FSDirectory fsd, String src, XAttr xAttr, boolean logRetryCache) throws IOException { FSDirXAttrOp.checkXAttrsConfigFlag(fsd); http://git-wip-us.apache.org/repos/asf/hadoop/blob/95e8f4c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 3b191c7c..b2063f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -25,6 +25,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; @@ -35,6 +36,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; @@ -1671,10 +1673,45 @@ public class FSDirectory implements Closeable { } } - HdfsFileStatus getAuditFileInfo(INodesInPath iip) + FileStatus getAuditFileInfo(INodesInPath iip) throws IOException { - return (namesystem.isAuditEnabled() && namesystem.isExternalInvocation()) - ? FSDirStatAndListingOp.getFileInfo(this, iip, false) : null; + if (!namesystem.isAuditEnabled() || !namesystem.isExternalInvocation()) { + return null; + } + + final INode inode = iip.getLastINode(); + if (inode == null) { + return null; + } + final int snapshot = iip.getPathSnapshotId(); + + Path symlink = null; + long size = 0; // length is zero for directories + short replication = 0; + long blocksize = 0; + + if (inode.isFile()) { + final INodeFile fileNode = inode.asFile(); + size = fileNode.computeFileSize(snapshot); + replication = fileNode.getFileReplication(snapshot); + blocksize = fileNode.getPreferredBlockSize(); + } else if (inode.isSymlink()) { + symlink = new Path( + DFSUtilClient.bytes2String(inode.asSymlink().getSymlink())); + } + + return new FileStatus( + size, + inode.isDirectory(), + replication, + blocksize, + inode.getModificationTime(snapshot), + inode.getAccessTime(snapshot), + inode.getFsPermission(snapshot), + inode.getUserName(snapshot), + inode.getGroupName(snapshot), + symlink, + new Path(iip.getPath())); } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/95e8f4c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 1df5275..a9ff6e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -344,25 +344,33 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } private void logAuditEvent(boolean succeeded, String cmd, String src, - String dst, HdfsFileStatus stat) throws IOException { + String dst, FileStatus stat) throws IOException { if (isAuditEnabled() && isExternalInvocation()) { logAuditEvent(succeeded, Server.getRemoteUser(), Server.getRemoteIp(), cmd, src, dst, stat); } } - private void logAuditEvent(boolean succeeded, - UserGroupInformation ugi, InetAddress addr, String cmd, String src, - String dst, HdfsFileStatus stat) { + private void logAuditEvent(boolean succeeded, String cmd, String src, + HdfsFileStatus stat) throws IOException { + if (!isAuditEnabled() || !isExternalInvocation()) { + return; + } FileStatus status = null; if (stat != null) { Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null; - Path path = dst != null ? new Path(dst) : new Path(src); + Path path = new Path(src); status = new FileStatus(stat.getLen(), stat.isDir(), stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(), stat.getAccessTime(), stat.getPermission(), stat.getOwner(), stat.getGroup(), symlink, path); } + logAuditEvent(succeeded, cmd, src, null, status); + } + + private void logAuditEvent(boolean succeeded, + UserGroupInformation ugi, InetAddress addr, String cmd, String src, + String dst, FileStatus status) { final String ugiStr = ugi.toString(); for (AuditLogger logger : auditLoggers) { if (logger instanceof HdfsAuditLogger) { @@ -1704,7 +1712,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, */ void setPermission(String src, FsPermission permission) throws IOException { final String operationName = "setPermission"; - HdfsFileStatus auditStat; + FileStatus auditStat; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -1728,7 +1736,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, void setOwner(String src, String username, String group) throws IOException { final String operationName = "setOwner"; - HdfsFileStatus auditStat; + FileStatus auditStat; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -1857,7 +1865,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, throws IOException { waitForLoadingFSImage(); final String operationName = "concat"; - HdfsFileStatus stat = null; + FileStatus stat = null; boolean success = false; writeLock(); try { @@ -1882,7 +1890,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, */ void setTimes(String src, long mtime, long atime) throws IOException { final String operationName = "setTimes"; - HdfsFileStatus auditStat; + FileStatus auditStat; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -1910,7 +1918,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, if (!FileSystem.areSymlinksEnabled()) { throw new UnsupportedOperationException("Symlinks not supported"); } - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -2021,7 +2029,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, */ void setStoragePolicy(String src, String policyName) throws IOException { final String operationName = "setStoragePolicy"; - HdfsFileStatus auditStat; + FileStatus auditStat; waitForLoadingFSImage(); checkOperation(OperationCategory.WRITE); writeLock(); @@ -2047,7 +2055,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, */ void unsetStoragePolicy(String src) throws IOException { final String operationName = "unsetStoragePolicy"; - HdfsFileStatus auditStat; + FileStatus auditStat; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -2169,7 +2177,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, logAuditEvent(false, "create", src); throw e; } - logAuditEvent(true, "create", src, null, status); + logAuditEvent(true, "create", src, status); return status; } @@ -2972,7 +2980,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, boolean mkdirs(String src, PermissionStatus permissions, boolean createParent) throws IOException { final String operationName = "mkdirs"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -7195,7 +7203,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, void modifyAclEntries(final String src, List<AclEntry> aclSpec) throws IOException { final String operationName = "modifyAclEntries"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -7216,7 +7224,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, throws IOException { final String operationName = "removeAclEntries"; checkOperation(OperationCategory.WRITE); - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7234,7 +7242,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, void removeDefaultAcl(final String src) throws IOException { final String operationName = "removeDefaultAcl"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -7253,7 +7261,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, void removeAcl(final String src) throws IOException { final String operationName = "removeAcl"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -7272,7 +7280,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, void setAcl(final String src, List<AclEntry> aclSpec) throws IOException { final String operationName = "setAcl"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -7326,7 +7334,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, checkSuperuserPrivilege(); FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); - final HdfsFileStatus resultingStat; + final FileStatus resultingStat; writeLock(); try { checkSuperuserPrivilege(); @@ -7357,14 +7365,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, EncryptionZone getEZForPath(final String srcArg) throws AccessControlException, UnresolvedLinkException, IOException { final String operationName = "getEZForPath"; - HdfsFileStatus resultingStat = null; + FileStatus resultingStat = null; boolean success = false; final FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); readLock(); try { checkOperation(OperationCategory.READ); - Entry<EncryptionZone, HdfsFileStatus> ezForPath = FSDirEncryptionZoneOp + Entry<EncryptionZone, FileStatus> ezForPath = FSDirEncryptionZoneOp .getEZForPath(dir, srcArg, pc); success = true; resultingStat = ezForPath.getValue(); @@ -7399,7 +7407,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, boolean logRetryCache) throws IOException { final String operationName = "setXAttr"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7449,7 +7457,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, void removeXAttr(String src, XAttr xAttr, boolean logRetryCache) throws IOException { final String operationName = "removeXAttr"; - HdfsFileStatus auditStat = null; + FileStatus auditStat = null; writeLock(); try { checkOperation(OperationCategory.WRITE); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org