Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 b4c2ab278 -> b114f2488


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 4c92249..3b3368d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
@@ -47,6 +46,7 @@ import org.apache.hadoop.security.AccessControlException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.EnumSet;
 
 import static org.apache.hadoop.util.Time.now;
 
@@ -384,7 +384,6 @@ class FSDirStatAndListingOp {
    * @param child for a directory listing of the iip, else null
    * @param storagePolicy for the path or closest ancestor
    * @param needLocation if block locations need to be included or not
-   * @param includeStoragePolicy if storage policy should be returned
    * @return a file status
    * @throws java.io.IOException if any error occurs
    */
@@ -439,7 +438,19 @@ class FSDirStatAndListingOp {
     int childrenNum = node.isDirectory() ?
         node.asDirectory().getChildrenNum(snapshot) : 0;
 
+    EnumSet<HdfsFileStatus.Flags> flags =
+        EnumSet.noneOf(HdfsFileStatus.Flags.class);
     INodeAttributes nodeAttrs = fsd.getAttributes(iip);
+    boolean hasAcl = nodeAttrs.getAclFeature() != null;
+    if (hasAcl) {
+      flags.add(HdfsFileStatus.Flags.HAS_ACL);
+    }
+    if (isEncrypted) {
+      flags.add(HdfsFileStatus.Flags.HAS_CRYPT);
+    }
+    if (isErasureCoded) {
+      flags.add(HdfsFileStatus.Flags.HAS_EC);
+    }
     return createFileStatus(
         size,
         node.isDirectory(),
@@ -447,7 +458,8 @@ class FSDirStatAndListingOp {
         blocksize,
         node.getModificationTime(snapshot),
         node.getAccessTime(snapshot),
-        getPermissionForFileStatus(nodeAttrs, isEncrypted, isErasureCoded),
+        nodeAttrs.getFsPermission(),
+        flags,
         nodeAttrs.getUserName(),
         nodeAttrs.getGroupName(),
         node.isSymlink() ? node.asSymlink().getSymlink() : null,
@@ -460,42 +472,22 @@ class FSDirStatAndListingOp {
         loc);
   }
 
-  private static HdfsFileStatus createFileStatus(long length, boolean isdir,
-      int replication, long blocksize, long mtime,
-      long atime, FsPermission permission, String owner, String group,
-      byte[] symlink, byte[] path, long fileId, int childrenNum,
-      FileEncryptionInfo feInfo, byte storagePolicy,
+  private static HdfsFileStatus createFileStatus(
+      long length, boolean isdir,
+      int replication, long blocksize, long mtime, long atime,
+      FsPermission permission, EnumSet<HdfsFileStatus.Flags> flags,
+      String owner, String group, byte[] symlink, byte[] path, long fileId,
+      int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
       ErasureCodingPolicy ecPolicy, LocatedBlocks locations) {
     if (locations == null) {
       return new HdfsFileStatus(length, isdir, replication, blocksize,
-          mtime, atime, permission, owner, group, symlink, path, fileId,
-          childrenNum, feInfo, storagePolicy, ecPolicy);
+          mtime, atime, permission, flags, owner, group, symlink, path,
+          fileId, childrenNum, feInfo, storagePolicy, ecPolicy);
     } else {
       return new HdfsLocatedFileStatus(length, isdir, replication, blocksize,
-          mtime, atime, permission, owner, group, symlink, path, fileId,
-          locations, childrenNum, feInfo, storagePolicy, ecPolicy);
-    }
-  }
-
-  /**
-   * Returns an inode's FsPermission for use in an outbound FileStatus.  If the
-   * inode has an ACL or is for an encrypted file/dir, then this method will
-   * return an FsPermissionExtension.
-   *
-   * @param node INode to check
-   * @param isEncrypted boolean true if the file/dir is encrypted
-   * @return FsPermission from inode, with ACL bit on if the inode has an ACL
-   * and encrypted bit on if it represents an encrypted file/dir.
-   */
-  private static FsPermission getPermissionForFileStatus(
-      INodeAttributes node, boolean isEncrypted, boolean isErasureCoded) {
-    FsPermission perm = node.getFsPermission();
-    boolean hasAcl = node.getAclFeature() != null;
-    if (hasAcl || isEncrypted || isErasureCoded) {
-      perm = new FsPermissionExtension(perm, hasAcl,
-          isEncrypted, isErasureCoded);
+          mtime, atime, permission, flags, owner, group, symlink, path,
+          fileId, locations, childrenNum, feInfo, storagePolicy, ecPolicy);
     }
-    return perm;
   }
 
   private static ContentSummary getContentSummaryInt(FSDirectory fsd,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 07dc5c1..87b1156 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -72,12 +72,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.concurrent.ForkJoinPool;
-import java.util.concurrent.RecursiveAction;
+import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
 import java.util.SortedSet;
 import java.util.TreeSet;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.RecursiveAction;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES;
@@ -135,11 +136,13 @@ public class FSDirectory implements Closeable {
 
   public final static HdfsFileStatus DOT_RESERVED_STATUS =
       new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770),
-          null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
+          EnumSet.noneOf(HdfsFileStatus.Flags.class), null, null, null,
+          HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
           HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
 
   public final static HdfsFileStatus DOT_SNAPSHOT_DIR_STATUS =
-      new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
+      new HdfsFileStatus(0, true, 0, 0, 0, 0, null,
+          EnumSet.noneOf(HdfsFileStatus.Flags.class), null, null, null,
           HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
           HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
 
@@ -383,12 +386,15 @@ public class FSDirectory implements Closeable {
    */
   void createReservedStatuses(long cTime) {
     HdfsFileStatus inodes = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
-        new FsPermission((short) 0770), null, supergroup, null,
+        new FsPermission((short) 0770),
+        EnumSet.noneOf(HdfsFileStatus.Flags.class), null, supergroup, null,
         DOT_INODES, -1L, 0, null,
         HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
     HdfsFileStatus raw = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
-        new FsPermission((short) 0770), null, supergroup, null, RAW, -1L,
-        0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
+        new FsPermission((short) 0770),
+        EnumSet.noneOf(HdfsFileStatus.Flags.class), null, supergroup, null,
+        RAW, -1L, 0, null,
+        HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
     reservedStatuses = new HdfsFileStatus[] {inodes, raw};
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index f0ebcbb..5849712 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -177,6 +177,7 @@ import 
org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException;
@@ -371,9 +372,11 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
     }
     FileStatus status = null;
     if (stat != null) {
-      Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null;
+      Path symlink = stat.isSymlink()
+          ? new Path(DFSUtilClient.bytes2String(stat.getSymlinkInBytes()))
+          : null;
       Path path = new Path(src);
-      status = new FileStatus(stat.getLen(), stat.isDir(),
+      status = new FileStatus(stat.getLen(), stat.isDirectory(),
           stat.getReplication(), stat.getBlockSize(),
           stat.getModificationTime(),
           stat.getAccessTime(), stat.getPermission(), stat.getOwner(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 9cd58cb..52b422c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -80,6 +80,7 @@ import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB;
 import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.inotify.EventBatch;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
@@ -1430,7 +1431,7 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
     } else if (!stat.isSymlink()) {
       throw new IOException("Path " + path + " is not a symbolic link");
     }
-    return stat.getSymlink();
+    return DFSUtilClient.bytes2String(stat.getSymlinkInBytes());
   }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index ffc203f..4b479e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -25,6 +25,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -35,6 +36,7 @@ import javax.management.ObjectName;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
@@ -345,8 +347,9 @@ public class SnapshotManager implements SnapshotStatsMXBean 
{
       if (userName == null || userName.equals(dir.getUserName())) {
         SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
             dir.getModificationTime(), dir.getAccessTime(),
-            dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
-            dir.getLocalNameBytes(), dir.getId(), 
+            dir.getFsPermission(), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+            dir.getUserName(), dir.getGroupName(),
+            dir.getLocalNameBytes(), dir.getId(),
             dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
             dir.getDirectorySnapshottableFeature().getNumSnapshots(),
             dir.getDirectorySnapshottableFeature().getSnapshotQuota(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index a8861a8..94752f5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -17,10 +17,18 @@
  */
 package org.apache.hadoop.hdfs.web;
 
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.ipc.RemoteException;
@@ -110,21 +118,20 @@ public class JsonUtil {
     m.put("pathSuffix", status.getLocalName());
     m.put("type", WebHdfsConstants.PathType.valueOf(status));
     if (status.isSymlink()) {
-      m.put("symlink", status.getSymlink());
+      m.put("symlink", DFSUtilClient.bytes2String(status.getSymlinkInBytes()));
     }
-
     m.put("length", status.getLen());
     m.put("owner", status.getOwner());
     m.put("group", status.getGroup());
     FsPermission perm = status.getPermission();
     m.put("permission", toString(perm));
-    if (perm.getAclBit()) {
+    if (status.hasAcl()) {
       m.put("aclBit", true);
     }
-    if (perm.getEncryptedBit()) {
+    if (status.isEncrypted()) {
       m.put("encBit", true);
     }
-    if (perm.getErasureCodedBit()) {
+    if (status.isErasureCoded()) {
       m.put("ecBit", true);
     }
     m.put("accessTime", status.getAccessTime());
@@ -373,15 +380,6 @@ public class JsonUtil {
     FsPermission perm = status.getPermission();
     if (perm != null) {
       m.put("permission", toString(perm));
-      if (perm.getAclBit()) {
-        m.put("aclBit", true);
-      }
-      if (perm.getEncryptedBit()) {
-        m.put("encBit", true);
-      }
-      if (perm.getErasureCodedBit()) {
-        m.put("ecBit", true);
-      }
     }
     final Map<String, Map<String, Object>> finalMap =
         new TreeMap<String, Map<String, Object>>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 7a71df8..5b16f4c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -259,12 +259,14 @@ public class TestDFSClientRetries {
     
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
-                (short) 777), "owner", "group", new byte[0], new byte[0],
+                (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+                "owner", "group", new byte[0], new byte[0],
                 1010, 0, null, (byte) 0, 
null)).when(mockNN).getFileInfo(anyString());
     
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
-                (short) 777), "owner", "group", new byte[0], new byte[0],
+                (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+                "owner", "group", new byte[0], new byte[0],
                 1010, 0, null, (byte) 0, null))
         .when(mockNN)
         .create(anyString(), (FsPermission) anyObject(), anyString(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 8eb3b7b..bf02db3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -891,7 +891,8 @@ public class TestEncryptionZones {
       CipherSuite suite, CryptoProtocolVersion version) throws Exception {
     Mockito.doReturn(
         new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
-            (short) 777), "owner", "group", new byte[0], new byte[0],
+            (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+            "owner", "group", new byte[0], new byte[0],
             1010, 0, new FileEncryptionInfo(suite,
             version, new byte[suite.getAlgorithmBlockSize()],
             new byte[suite.getAlgorithmBlockSize()],

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java
new file mode 100644
index 0000000..e5d0566
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.net.URI;
+
+import org.apache.hadoop.fs.FSProtos.FileStatusProto;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+import com.google.protobuf.ByteString;
+
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Verify compatible FileStatus/HdfsFileStatus serialization.
+ */
+public class TestFileStatusSerialization {
+
+  private static void checkFields(FileStatus expected, FileStatus actual) {
+    assertEquals(expected.getPath(), actual.getPath());
+    assertEquals(expected.isDirectory(), actual.isDirectory());
+    assertEquals(expected.getLen(), actual.getLen());
+    assertEquals(expected.getPermission(), actual.getPermission());
+    assertEquals(expected.getOwner(), actual.getOwner());
+    assertEquals(expected.getGroup(), actual.getGroup());
+    assertEquals(expected.getModificationTime(), actual.getModificationTime());
+    assertEquals(expected.getAccessTime(), actual.getAccessTime());
+    assertEquals(expected.getReplication(), actual.getReplication());
+    assertEquals(expected.getBlockSize(), actual.getBlockSize());
+  }
+
+  /**
+   * Test API backwards-compatibility with 2.x applications w.r.t. 
FsPermission.
+   */
+  @Test
+  @SuppressWarnings("deprecation")
+  public void testFsPermissionCompatibility() throws Exception {
+    final int flagmask = 0x8;
+    // flags compatible with 2.x; fixed as constant in this test to ensure
+    // compatibility is maintained. New flags are not part of the contract this
+    // test verifies.
+    for (int i = 0; i < flagmask; ++i) {
+      FsPermission perm = FsPermission.createImmutable((short) 0013);
+      HdfsFileStatusProto.Builder hspb = HdfsFileStatusProto.newBuilder()
+          .setFileType(FileType.IS_FILE)
+          .setPath(ByteString.copyFromUtf8("hdfs://foobar/dingos/zot"))
+          .setLength(4344)
+          .setPermission(PBHelperClient.convert(perm))
+          .setOwner("hadoop")
+          .setGroup("unqbbc")
+          .setModificationTime(12345678L)
+          .setAccessTime(87654321L)
+          .setBlockReplication(10)
+          .setBlocksize(1L << 33)
+          .setFlags(i);
+      HdfsFileStatus stat = PBHelperClient.convert(hspb.build());
+      stat.makeQualified(new URI("hdfs://foobar"), new Path("/dingos"));
+      assertEquals(new Path("hdfs://foobar/dingos/zot"), stat.getPath());
+
+      // verify deprecated FsPermissionExtension methods
+      FsPermission sp = stat.getPermission();
+      assertEquals(sp.getAclBit(), stat.hasAcl());
+      assertEquals(sp.getEncryptedBit(), stat.isEncrypted());
+      assertEquals(sp.getErasureCodedBit(), stat.isErasureCoded());
+
+      // verify Writable contract
+      DataOutputBuffer dob = new DataOutputBuffer();
+      stat.write(dob);
+      DataInputBuffer dib = new DataInputBuffer();
+      dib.reset(dob.getData(), 0, dob.getLength());
+      FileStatus fstat = new FileStatus();
+      fstat.readFields(dib);
+      checkFields(stat, fstat);
+
+      // FsPermisisonExtension used for HdfsFileStatus, not FileStatus,
+      // attribute flags should still be preserved
+      assertEquals(sp.getAclBit(), fstat.hasAcl());
+      assertEquals(sp.getEncryptedBit(), fstat.isEncrypted());
+      assertEquals(sp.getErasureCodedBit(), fstat.isErasureCoded());
+    }
+  }
+  // param for LocatedFileStatus, HttpFileStatus
+
+  @Test
+  public void testCrossSerializationProto() throws Exception {
+    FsPermission perm = FsPermission.getFileDefault();
+    for (FileType t : FileType.values()) {
+      HdfsFileStatusProto.Builder hspb = HdfsFileStatusProto.newBuilder()
+          .setFileType(t)
+          .setPath(ByteString.copyFromUtf8("hdfs://foobar/dingos"))
+          .setLength(4344)
+          .setPermission(PBHelperClient.convert(perm))
+          .setOwner("hadoop")
+          .setGroup("unqbbc")
+          .setModificationTime(12345678L)
+          .setAccessTime(87654321L)
+          .setBlockReplication(10)
+          .setBlocksize(1L << 33);
+      if (FileType.IS_SYMLINK.equals(t)) {
+        hspb.setSymlink(ByteString.copyFromUtf8("hdfs://yaks/dingos"));
+      }
+      if (FileType.IS_FILE.equals(t)) {
+        hspb.setFileId(4544);
+      }
+      HdfsFileStatusProto hsp = hspb.build();
+      byte[] src = hsp.toByteArray();
+      FileStatusProto fsp = FileStatusProto.parseFrom(src);
+      assertEquals(hsp.getPath().toStringUtf8(), fsp.getPath());
+      assertEquals(hsp.getLength(), fsp.getLength());
+      assertEquals(hsp.getPermission().getPerm(),
+                   fsp.getPermission().getPerm());
+      assertEquals(hsp.getOwner(), fsp.getOwner());
+      assertEquals(hsp.getGroup(), fsp.getGroup());
+      assertEquals(hsp.getModificationTime(), fsp.getModificationTime());
+      assertEquals(hsp.getAccessTime(), fsp.getAccessTime());
+      assertEquals(hsp.getSymlink().toStringUtf8(), fsp.getSymlink());
+      assertEquals(hsp.getBlockReplication(), fsp.getBlockReplication());
+      assertEquals(hsp.getBlocksize(), fsp.getBlockSize());
+      assertEquals(hsp.getFileType().ordinal(), fsp.getFileType().ordinal());
+
+      // verify unknown fields preserved
+      byte[] dst = fsp.toByteArray();
+      HdfsFileStatusProto hsp2 = HdfsFileStatusProto.parseFrom(dst);
+      assertEquals(hsp, hsp2);
+      checkFields(PBHelperClient.convert(hsp), PBHelperClient.convert(hsp2));
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
index 16cdf9b..161e227 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
@@ -30,6 +30,7 @@ import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.EnumSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -353,12 +354,14 @@ public class TestLease {
 
     Mockito.doReturn(
         new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
-            (short) 777), "owner", "group", new byte[0], new byte[0],
+            (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+            "owner", "group", new byte[0], new byte[0],
             1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString());
     Mockito
         .doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
-                (short) 777), "owner", "group", new byte[0], new byte[0],
+                (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+                "owner", "group", new byte[0], new byte[0],
                 1010, 0, null, (byte) 0, null))
         .when(mcp)
         .create(anyString(), (FsPermission) anyObject(), anyString(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java
index 52a6858..646e800 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.*;
 
 import java.io.IOException;
 
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -141,6 +142,11 @@ public final class AclTestHelpers {
     }
   }
 
+  public static void assertPermission(FileSystem fs, Path pathToCheck,
+      short perm) throws IOException {
+    assertPermission(fs, pathToCheck, perm, (perm & (1 << 12)) != 0);
+  }
+
   /**
    * Asserts the value of the FsPermission bits on the inode of a specific 
path.
    *
@@ -150,10 +156,11 @@ public final class AclTestHelpers {
    * @throws IOException thrown if there is an I/O error
    */
   public static void assertPermission(FileSystem fs, Path pathToCheck,
-      short perm) throws IOException {
+      short perm, boolean hasAcl) throws IOException {
     short filteredPerm = (short)(perm & 01777);
-    FsPermission fsPermission = fs.getFileStatus(pathToCheck).getPermission();
+    FileStatus stat = fs.getFileStatus(pathToCheck);
+    FsPermission fsPermission = stat.getPermission();
     assertEquals(filteredPerm, fsPermission.toShort());
-    assertEquals(((perm & (1 << 12)) != 0), fsPermission.getAclBit());
+    assertEquals(hasAcl, stat.hasAcl());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 6c755e7..38c17b7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -48,6 +48,7 @@ import java.nio.channels.FileChannel;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -1355,7 +1356,8 @@ public class TestFsck {
     byte storagePolicy = 0;
 
     HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
-        blockSize, modTime, accessTime, perms, owner, group, symlink,
+        blockSize, modTime, accessTime, perms,
+        EnumSet.noneOf(HdfsFileStatus.Flags.class), owner, group, symlink,
         path, fileId, numChildren, null, storagePolicy, null);
     Result replRes = new ReplicationResult(conf);
     Result ecRes = new ErasureCodingResult(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e44e7b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index 5a3d451..edb79d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
 
 import java.io.IOException;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -37,6 +38,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -51,10 +53,12 @@ import com.google.common.collect.Lists;
 
 public class TestJsonUtil {
   static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
-    return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
+    return new FileStatus(f.getLen(), f.isDirectory(), f.getReplication(),
         f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
         f.getPermission(), f.getOwner(), f.getGroup(),
-        f.isSymlink() ? new Path(f.getSymlink()) : null,
+        f.isSymlink()
+          ? new Path(DFSUtilClient.bytes2String(f.getSymlinkInBytes()))
+          : null,
         new Path(f.getFullName(parent)));
   }
 
@@ -63,7 +67,8 @@ public class TestJsonUtil {
     final long now = Time.now();
     final String parent = "/dir";
     final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
-        now, now + 10, new FsPermission((short) 0644), "user", "group",
+        now, now + 10, new FsPermission((short) 0644),
+        EnumSet.noneOf(HdfsFileStatus.Flags.class), "user", "group",
         DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
         HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null);
     final FileStatus fstatus = toFileStatus(status, parent);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to