HDFS-10832. Propagate ACL bit and isEncrypted bit in HttpFS FileStatus 
permissions.

(cherry picked from commit cba973f036a2c8fcc21a5826b8306247ec154c7b)
(cherry picked from commit f448ce2a89aff506c7aa99d18c47a6db81411a04)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c24388a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c24388a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c24388a

Branch: refs/heads/branch-2.8
Commit: 8c24388af07a866d94d20a1f8de10244d68a9a6a
Parents: 911ae15
Author: Andrew Wang <w...@apache.org>
Authored: Fri Sep 9 12:12:29 2016 -0700
Committer: Xiao Chen <x...@apache.org>
Committed: Mon Sep 19 16:38:00 2016 -0700

----------------------------------------------------------------------
 .../hadoop/fs/http/client/HttpFSFileSystem.java |  21 +-
 .../hadoop/fs/http/server/FSOperations.java     | 186 ++++-----------
 .../fs/http/client/BaseTestHttpFSWith.java      | 227 ++++++++++++-------
 .../org/apache/hadoop/test/TestHdfsHelper.java  |  23 +-
 4 files changed, 228 insertions(+), 229 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c24388a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index df2b279..d820ec5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.lib.wsrs.EnumSetParam;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -182,6 +183,8 @@ public class HttpFSFileSystem extends FileSystem
   public static final String ACL_ENTRIES_JSON = "entries";
   public static final String ACL_BIT_JSON = "aclBit";
 
+  public static final String ENC_BIT_JSON = "encBit";
+
   public static final int HTTP_TEMPORARY_REDIRECT = 307;
 
   private static final String HTTP_GET = "GET";
@@ -957,6 +960,21 @@ public class HttpFSFileSystem extends FileSystem
     return createAclStatus(json);
   }
 
+  /** Convert a string to a FsPermission object. */
+  static FsPermission toFsPermission(JSONObject json) {
+    final String s = (String) json.get(PERMISSION_JSON);
+    final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON);
+    final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON);
+    FsPermission perm = new FsPermission(Short.parseShort(s, 8));
+    final boolean aBit = (aclBit != null) ? aclBit : false;
+    final boolean eBit = (encBit != null) ? encBit : false;
+    if (aBit || eBit) {
+      return new FsPermissionExtension(perm, aBit, eBit);
+    } else {
+      return perm;
+    }
+  }
+
   private FileStatus createFileStatus(Path parent, JSONObject json) {
     String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
     Path path = (pathSuffix.equals("")) ? parent : new Path(parent, 
pathSuffix);
@@ -964,8 +982,7 @@ public class HttpFSFileSystem extends FileSystem
     long len = (Long) json.get(LENGTH_JSON);
     String owner = (String) json.get(OWNER_JSON);
     String group = (String) json.get(GROUP_JSON);
-    FsPermission permission =
-      new FsPermission(Short.parseShort((String) json.get(PERMISSION_JSON), 
8));
+    final FsPermission permission = toFsPermission(json);
     long aTime = (Long) json.get(ACCESS_TIME_JSON);
     long mTime = (Long) json.get(MODIFICATION_TIME_JSON);
     long blockSize = (Long) json.get(BLOCK_SIZE_JSON);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c24388a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 57bf025..39597eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.lib.service.FileSystemAccess;
 import org.apache.hadoop.util.StringUtils;
@@ -54,148 +53,59 @@ import java.util.Map.Entry;
 public class FSOperations {
 
   /**
-   * This class is used to group a FileStatus and an AclStatus together.
-   * It's needed for the GETFILESTATUS and LISTSTATUS calls, which take
-   * most info from the FileStatus and a wee bit from the AclStatus.
+   * @param fileStatus a FileStatus object
+   * @return JSON map suitable for wire transport
    */
-  private static class StatusPair {
-    private FileStatus fileStatus;
-    private AclStatus aclStatus;
-
-    /**
-     * Simple constructor
-     * @param fileStatus Existing FileStatus object
-     * @param aclStatus Existing AclStatus object
-     */
-    public StatusPair(FileStatus fileStatus, AclStatus aclStatus) {
-      this.fileStatus = fileStatus;
-      this.aclStatus = aclStatus;
-    }
-
-    /**
-     * Create one StatusPair by performing the underlying calls to
-     * fs.getFileStatus and fs.getAclStatus
-     * @param fs The FileSystem where 'path' lives
-     * @param path The file/directory to query
-     * @throws IOException
-     */
-    public StatusPair(FileSystem fs, Path path) throws IOException {
-      fileStatus = fs.getFileStatus(path);
-      aclStatus = null;
-      try {
-        aclStatus = fs.getAclStatus(path);
-      } catch (AclException e) {
-        /*
-         * The cause is almost certainly an "ACLS aren't enabled"
-         * exception, so leave aclStatus at null and carry on.
-         */
-      } catch (UnsupportedOperationException e) {
-        /* Ditto above - this is the case for a local file system */
-      }
-    }
-
-    /**
-     * Return a Map suitable for conversion into JSON format
-     * @return The JSONish Map
-     */
-    public Map<String,Object> toJson() {
-      Map<String,Object> json = new LinkedHashMap<String,Object>();
-      json.put(HttpFSFileSystem.FILE_STATUS_JSON, toJsonInner(true));
-      return json;
-    }
+  private static Map<String, Object> toJson(FileStatus fileStatus) {
+    Map<String, Object> json = new LinkedHashMap<>();
+    json.put(HttpFSFileSystem.FILE_STATUS_JSON, toJsonInner(fileStatus, true));
+    return json;
+  }
 
-    /**
-     * Return in inner part of the JSON for the status - used by both the
-     * GETFILESTATUS and LISTSTATUS calls.
-     * @param emptyPathSuffix Whether or not to include PATH_SUFFIX_JSON
-     * @return The JSONish Map
-     */
-    public Map<String,Object> toJsonInner(boolean emptyPathSuffix) {
-      Map<String,Object> json = new LinkedHashMap<String,Object>();
-      json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
-              (emptyPathSuffix) ? "" : fileStatus.getPath().getName());
-      json.put(HttpFSFileSystem.TYPE_JSON,
-              HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
-      json.put(HttpFSFileSystem.LENGTH_JSON, fileStatus.getLen());
-      json.put(HttpFSFileSystem.OWNER_JSON, fileStatus.getOwner());
-      json.put(HttpFSFileSystem.GROUP_JSON, fileStatus.getGroup());
-      json.put(HttpFSFileSystem.PERMISSION_JSON,
-              HttpFSFileSystem.permissionToString(fileStatus.getPermission()));
-      json.put(HttpFSFileSystem.ACCESS_TIME_JSON, fileStatus.getAccessTime());
-      json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON,
-              fileStatus.getModificationTime());
-      json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
-      json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
-      if ( (aclStatus != null) && !(aclStatus.getEntries().isEmpty()) ) {
-        json.put(HttpFSFileSystem.ACL_BIT_JSON,true);
-      }
-      return json;
+  /**
+   * @param fileStatuses list of FileStatus objects
+   * @return JSON map suitable for wire transport
+   */
+  @SuppressWarnings({"unchecked"})
+  private static Map<String, Object> toJson(FileStatus[] fileStatuses) {
+    Map<String, Object> json = new LinkedHashMap<>();
+    Map<String, Object> inner = new LinkedHashMap<>();
+    JSONArray statuses = new JSONArray();
+    for (FileStatus f : fileStatuses) {
+      statuses.add(toJsonInner(f, false));
     }
+    inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
+    json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
+    return json;
   }
 
   /**
-   * Simple class used to contain and operate upon a list of StatusPair
-   * objects.  Used by LISTSTATUS.
+   * Not meant to be called directly except by the other toJson functions.
    */
-  private static class StatusPairs {
-    private StatusPair[] statusPairs;
-
-    /**
-     * Construct a list of StatusPair objects
-     * @param fs The FileSystem where 'path' lives
-     * @param path The directory to query
-     * @param filter A possible filter for entries in the directory
-     * @throws IOException
-     */
-    public StatusPairs(FileSystem fs, Path path, PathFilter filter)
-            throws IOException {
-      /* Grab all the file statuses at once in an array */
-      FileStatus[] fileStatuses = fs.listStatus(path, filter);
-
-      /* We'll have an array of StatusPairs of the same length */
-      AclStatus aclStatus = null;
-      statusPairs = new StatusPair[fileStatuses.length];
-
-      /*
-       * For each FileStatus, attempt to acquire an AclStatus.  If the
-       * getAclStatus throws an exception, we assume that ACLs are turned
-       * off entirely and abandon the attempt.
-       */
-      boolean useAcls = true;   // Assume ACLs work until proven otherwise
-      for (int i = 0; i < fileStatuses.length; i++) {
-        if (useAcls) {
-          try {
-            aclStatus = fs.getAclStatus(fileStatuses[i].getPath());
-          } catch (AclException e) {
-            /* Almost certainly due to an "ACLs not enabled" exception */
-            aclStatus = null;
-            useAcls = false;
-          } catch (UnsupportedOperationException e) {
-            /* Ditto above - this is the case for a local file system */
-            aclStatus = null;
-            useAcls = false;
-          }
-        }
-        statusPairs[i] = new StatusPair(fileStatuses[i], aclStatus);
-      }
+  private static Map<String, Object> toJsonInner(FileStatus fileStatus,
+      boolean emptyPathSuffix) {
+    Map<String, Object> json = new LinkedHashMap<String, Object>();
+    json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
+        (emptyPathSuffix) ? "" : fileStatus.getPath().getName());
+    json.put(HttpFSFileSystem.TYPE_JSON,
+        HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
+    json.put(HttpFSFileSystem.LENGTH_JSON, fileStatus.getLen());
+    json.put(HttpFSFileSystem.OWNER_JSON, fileStatus.getOwner());
+    json.put(HttpFSFileSystem.GROUP_JSON, fileStatus.getGroup());
+    json.put(HttpFSFileSystem.PERMISSION_JSON,
+        HttpFSFileSystem.permissionToString(fileStatus.getPermission()));
+    json.put(HttpFSFileSystem.ACCESS_TIME_JSON, fileStatus.getAccessTime());
+    json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON,
+        fileStatus.getModificationTime());
+    json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
+    json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
+    if (fileStatus.getPermission().getAclBit()) {
+      json.put(HttpFSFileSystem.ACL_BIT_JSON, true);
     }
-
-    /**
-     * Return a Map suitable for conversion into JSON.
-     * @return A JSONish Map
-     */
-    @SuppressWarnings({"unchecked"})
-    public Map<String,Object> toJson() {
-      Map<String,Object> json = new LinkedHashMap<String,Object>();
-      Map<String,Object> inner = new LinkedHashMap<String,Object>();
-      JSONArray statuses = new JSONArray();
-      for (StatusPair s : statusPairs) {
-        statuses.add(s.toJsonInner(false));
-      }
-      inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
-      json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
-      return json;
+    if (fileStatus.getPermission().getEncryptedBit()) {
+      json.put(HttpFSFileSystem.ENC_BIT_JSON, true);
     }
+    return json;
   }
 
   /** Converts an <code>AclStatus</code> object into a JSON object.
@@ -637,8 +547,8 @@ public class FSOperations {
      */
     @Override
     public Map execute(FileSystem fs) throws IOException {
-      StatusPair sp = new StatusPair(fs, path);
-      return sp.toJson();
+      FileStatus status = fs.getFileStatus(path);
+      return toJson(status);
     }
 
   }
@@ -703,8 +613,8 @@ public class FSOperations {
      */
     @Override
     public Map execute(FileSystem fs) throws IOException {
-      StatusPairs sp = new StatusPairs(fs, path, filter);
-      return sp.toJson();
+      FileStatus[] fileStatuses = fs.listStatus(path, filter);
+      return toJson(fileStatuses);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c24388a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index 3f0218e..3dbc4ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.test.HadoopUsersConfTestHelper;
 import org.apache.hadoop.test.TestDir;
 import org.apache.hadoop.test.TestDirHelper;
 import org.apache.hadoop.test.TestHdfs;
+import org.apache.hadoop.test.TestHdfsHelper;
 import org.apache.hadoop.test.TestJetty;
 import org.apache.hadoop.test.TestJettyHelper;
 import org.junit.Assert;
@@ -66,6 +67,11 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 @RunWith(value = Parameterized.class)
 public abstract class BaseTestHttpFSWith extends HFSTestCase {
 
@@ -81,9 +87,9 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
 
   private void createHttpFSServer() throws Exception {
     File homeDir = TestDirHelper.getTestDir();
-    Assert.assertTrue(new File(homeDir, "conf").mkdir());
-    Assert.assertTrue(new File(homeDir, "log").mkdir());
-    Assert.assertTrue(new File(homeDir, "temp").mkdir());
+    assertTrue(new File(homeDir, "conf").mkdir());
+    assertTrue(new File(homeDir, "log").mkdir());
+    assertTrue(new File(homeDir, "temp").mkdir());
     HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
 
     File secretFile = new File(new File(homeDir, "conf"), "secret");
@@ -143,7 +149,7 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     Assert.assertNotNull(fs);
     URI uri = new URI(getScheme() + "://" +
                       TestJettyHelper.getJettyURL().toURI().getAuthority());
-    Assert.assertEquals(fs.getUri(), uri);
+    assertEquals(fs.getUri(), uri);
     fs.close();
   }
 
@@ -156,7 +162,7 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     fs.close();
     fs = getHttpFSFileSystem();
     InputStream is = fs.open(new Path(path.toUri().getPath()));
-    Assert.assertEquals(is.read(), 1);
+    assertEquals(is.read(), 1);
     is.close();
     fs.close();
   }
@@ -173,12 +179,12 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     fs = FileSystem.get(getProxiedFSConf());
     FileStatus status = fs.getFileStatus(path);
     if (!isLocalFS()) {
-      Assert.assertEquals(status.getReplication(), 2);
-      Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
+      assertEquals(status.getReplication(), 2);
+      assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
     }
-    Assert.assertEquals(status.getPermission(), permission);
+    assertEquals(status.getPermission(), permission);
     InputStream is = fs.open(path);
-    Assert.assertEquals(is.read(), 1);
+    assertEquals(is.read(), 1);
     is.close();
     fs.close();
   }
@@ -216,9 +222,9 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
       fs.close();
       fs = FileSystem.get(getProxiedFSConf());
       InputStream is = fs.open(path);
-      Assert.assertEquals(is.read(), 1);
-      Assert.assertEquals(is.read(), 2);
-      Assert.assertEquals(is.read(), -1);
+      assertEquals(is.read(), 1);
+      assertEquals(is.read(), 2);
+      assertEquals(is.read(), -1);
       is.close();
       fs.close();
     }
@@ -239,10 +245,10 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
       final int newLength = blockSize;
 
       boolean isReady = fs.truncate(file, newLength);
-      Assert.assertTrue("Recovery is not expected.", isReady);
+      assertTrue("Recovery is not expected.", isReady);
 
       FileStatus fileStatus = fs.getFileStatus(file);
-      Assert.assertEquals(fileStatus.getLen(), newLength);
+      assertEquals(fileStatus.getLen(), newLength);
       AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
 
       fs.close();
@@ -266,9 +272,9 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
       fs.concat(path1, new Path[]{path2, path3});
       fs.close();
       fs = FileSystem.get(config);
-      Assert.assertTrue(fs.exists(path1));
-      Assert.assertFalse(fs.exists(path2));
-      Assert.assertFalse(fs.exists(path3));
+      assertTrue(fs.exists(path1));
+      assertFalse(fs.exists(path2));
+      assertFalse(fs.exists(path3));
       fs.close();
     }
   }
@@ -284,8 +290,8 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     fs.rename(oldPath, newPath);
     fs.close();
     fs = FileSystem.get(getProxiedFSConf());
-    Assert.assertFalse(fs.exists(oldPath));
-    Assert.assertTrue(fs.exists(newPath));
+    assertFalse(fs.exists(oldPath));
+    assertTrue(fs.exists(newPath));
     fs.close();
   }
 
@@ -299,8 +305,8 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     fs.mkdirs(foe);
 
     FileSystem hoopFs = getHttpFSFileSystem();
-    Assert.assertTrue(hoopFs.delete(new Path(foo.toUri().getPath()), false));
-    Assert.assertFalse(fs.exists(foo));
+    assertTrue(hoopFs.delete(new Path(foo.toUri().getPath()), false));
+    assertFalse(fs.exists(foo));
     try {
       hoopFs.delete(new Path(bar.toUri().getPath()), false);
       Assert.fail();
@@ -308,13 +314,13 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     } catch (Exception ex) {
       Assert.fail();
     }
-    Assert.assertTrue(fs.exists(bar));
-    Assert.assertTrue(hoopFs.delete(new Path(bar.toUri().getPath()), true));
-    Assert.assertFalse(fs.exists(bar));
+    assertTrue(fs.exists(bar));
+    assertTrue(hoopFs.delete(new Path(bar.toUri().getPath()), true));
+    assertFalse(fs.exists(bar));
 
-    Assert.assertTrue(fs.exists(foe));
-    Assert.assertTrue(hoopFs.delete(foe, true));
-    Assert.assertFalse(fs.exists(foe));
+    assertTrue(fs.exists(foe));
+    assertTrue(hoopFs.delete(foe, true));
+    assertFalse(fs.exists(foe));
 
     hoopFs.close();
     fs.close();
@@ -333,19 +339,20 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
     fs.close();
 
-    Assert.assertEquals(status2.getPermission(), status1.getPermission());
-    Assert.assertEquals(status2.getPath().toUri().getPath(), 
status1.getPath().toUri().getPath());
-    Assert.assertEquals(status2.getReplication(), status1.getReplication());
-    Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize());
-    Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime());
-    Assert.assertEquals(status2.getModificationTime(), 
status1.getModificationTime());
-    Assert.assertEquals(status2.getOwner(), status1.getOwner());
-    Assert.assertEquals(status2.getGroup(), status1.getGroup());
-    Assert.assertEquals(status2.getLen(), status1.getLen());
+    assertEquals(status2.getPermission(), status1.getPermission());
+    assertEquals(status2.getPath().toUri().getPath(),
+        status1.getPath().toUri().getPath());
+    assertEquals(status2.getReplication(), status1.getReplication());
+    assertEquals(status2.getBlockSize(), status1.getBlockSize());
+    assertEquals(status2.getAccessTime(), status1.getAccessTime());
+    assertEquals(status2.getModificationTime(), status1.getModificationTime());
+    assertEquals(status2.getOwner(), status1.getOwner());
+    assertEquals(status2.getGroup(), status1.getGroup());
+    assertEquals(status2.getLen(), status1.getLen());
 
     FileStatus[] stati = fs.listStatus(path.getParent());
-    Assert.assertEquals(stati.length, 1);
-    Assert.assertEquals(stati[0].getPath().getName(), path.getName());
+    assertEquals(stati.length, 1);
+    assertEquals(stati[0].getPath().getName(), path.getName());
   }
 
   private void testWorkingdirectory() throws Exception {
@@ -359,14 +366,15 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     }
     Path httpFSWorkingDir = fs.getWorkingDirectory();
     fs.close();
-    Assert.assertEquals(httpFSWorkingDir.toUri().getPath(),
+    assertEquals(httpFSWorkingDir.toUri().getPath(),
                         workingDir.toUri().getPath());
 
     fs = getHttpFSFileSystem();
     fs.setWorkingDirectory(new Path("/tmp"));
     workingDir = fs.getWorkingDirectory();
     fs.close();
-    Assert.assertEquals(workingDir.toUri().getPath(), new 
Path("/tmp").toUri().getPath());
+    assertEquals(workingDir.toUri().getPath(),
+        new Path("/tmp").toUri().getPath());
   }
 
   private void testMkdirs() throws Exception {
@@ -375,7 +383,7 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     fs.mkdirs(path);
     fs.close();
     fs = FileSystem.get(getProxiedFSConf());
-    Assert.assertTrue(fs.exists(path));
+    assertTrue(fs.exists(path));
     fs.close();
   }
 
@@ -400,8 +408,8 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
       fs.close();
       long atNew = status1.getAccessTime();
       long mtNew = status1.getModificationTime();
-      Assert.assertEquals(mtNew, mt - 10);
-      Assert.assertEquals(atNew, at - 20);
+      assertEquals(mtNew, mt - 10);
+      assertEquals(atNew, at - 20);
     }
   }
 
@@ -419,7 +427,7 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     FileStatus status1 = fs.getFileStatus(path);
     fs.close();
     FsPermission permission2 = status1.getPermission();
-    Assert.assertEquals(permission2, permission1);
+    assertEquals(permission2, permission1);
 
     //sticky bit
     fs = getHttpFSFileSystem();
@@ -431,8 +439,8 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     status1 = fs.getFileStatus(path);
     fs.close();
     permission2 = status1.getPermission();
-    Assert.assertTrue(permission2.getStickyBit());
-    Assert.assertEquals(permission2, permission1);
+    assertTrue(permission2.getStickyBit());
+    assertEquals(permission2, permission1);
   }
 
   private void testSetOwner() throws Exception {
@@ -454,8 +462,8 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
       fs = FileSystem.get(getProxiedFSConf());
       FileStatus status1 = fs.getFileStatus(path);
       fs.close();
-      Assert.assertEquals(status1.getOwner(), user);
-      Assert.assertEquals(status1.getGroup(), group);
+      assertEquals(status1.getOwner(), user);
+      assertEquals(status1.getGroup(), group);
     }
   }
 
@@ -475,7 +483,7 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     fs = FileSystem.get(getProxiedFSConf());
     FileStatus status1 = fs.getFileStatus(path);
     fs.close();
-    Assert.assertEquals(status1.getReplication(), (short) 1);
+    assertEquals(status1.getReplication(), (short) 1);
   }
 
   private void testChecksum() throws Exception {
@@ -491,9 +499,10 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
       fs = getHttpFSFileSystem();
       FileChecksum httpChecksum = fs.getFileChecksum(path);
       fs.close();
-      Assert.assertEquals(httpChecksum.getAlgorithmName(), 
hdfsChecksum.getAlgorithmName());
-      Assert.assertEquals(httpChecksum.getLength(), hdfsChecksum.getLength());
-      Assert.assertArrayEquals(httpChecksum.getBytes(), 
hdfsChecksum.getBytes());
+      assertEquals(httpChecksum.getAlgorithmName(),
+          hdfsChecksum.getAlgorithmName());
+      assertEquals(httpChecksum.getLength(), hdfsChecksum.getLength());
+      assertArrayEquals(httpChecksum.getBytes(), hdfsChecksum.getBytes());
     }
   }
 
@@ -508,12 +517,17 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     fs = getHttpFSFileSystem();
     ContentSummary httpContentSummary = fs.getContentSummary(path);
     fs.close();
-    Assert.assertEquals(httpContentSummary.getDirectoryCount(), 
hdfsContentSummary.getDirectoryCount());
-    Assert.assertEquals(httpContentSummary.getFileCount(), 
hdfsContentSummary.getFileCount());
-    Assert.assertEquals(httpContentSummary.getLength(), 
hdfsContentSummary.getLength());
-    Assert.assertEquals(httpContentSummary.getQuota(), 
hdfsContentSummary.getQuota());
-    Assert.assertEquals(httpContentSummary.getSpaceConsumed(), 
hdfsContentSummary.getSpaceConsumed());
-    Assert.assertEquals(httpContentSummary.getSpaceQuota(), 
hdfsContentSummary.getSpaceQuota());
+    assertEquals(httpContentSummary.getDirectoryCount(),
+        hdfsContentSummary.getDirectoryCount());
+    assertEquals(httpContentSummary.getFileCount(),
+        hdfsContentSummary.getFileCount());
+    assertEquals(httpContentSummary.getLength(),
+        hdfsContentSummary.getLength());
+    assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota());
+    assertEquals(httpContentSummary.getSpaceConsumed(),
+        hdfsContentSummary.getSpaceConsumed());
+    assertEquals(httpContentSummary.getSpaceQuota(),
+        hdfsContentSummary.getSpaceQuota());
   }
   
   /** Set xattr */
@@ -552,11 +566,11 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
       fs = FileSystem.get(getProxiedFSConf());
       Map<String, byte[]> xAttrs = fs.getXAttrs(path);
       fs.close();
-      Assert.assertEquals(4, xAttrs.size());
-      Assert.assertArrayEquals(value1, xAttrs.get(name1));
-      Assert.assertArrayEquals(value2, xAttrs.get(name2));
-      Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
-      Assert.assertArrayEquals(value4, xAttrs.get(name4));
+      assertEquals(4, xAttrs.size());
+      assertArrayEquals(value1, xAttrs.get(name1));
+      assertArrayEquals(value2, xAttrs.get(name2));
+      assertArrayEquals(new byte[0], xAttrs.get(name3));
+      assertArrayEquals(value4, xAttrs.get(name4));
     }
   }
 
@@ -595,16 +609,16 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
       names.add(name4);
       Map<String, byte[]> xAttrs = fs.getXAttrs(path, names);
       fs.close();
-      Assert.assertEquals(4, xAttrs.size());
-      Assert.assertArrayEquals(value1, xAttrs.get(name1));
-      Assert.assertArrayEquals(value2, xAttrs.get(name2));
-      Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
-      Assert.assertArrayEquals(value4, xAttrs.get(name4));
+      assertEquals(4, xAttrs.size());
+      assertArrayEquals(value1, xAttrs.get(name1));
+      assertArrayEquals(value2, xAttrs.get(name2));
+      assertArrayEquals(new byte[0], xAttrs.get(name3));
+      assertArrayEquals(value4, xAttrs.get(name4));
 
       // Get specific xattr
       fs = getHttpFSFileSystem();
       byte[] value = fs.getXAttr(path, name1);
-      Assert.assertArrayEquals(value1, value);
+      assertArrayEquals(value1, value);
       final String name5 = "a1";
       try {
         value = fs.getXAttr(path, name5);
@@ -618,11 +632,11 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
       fs = getHttpFSFileSystem();
       xAttrs = fs.getXAttrs(path);
       fs.close();
-      Assert.assertEquals(4, xAttrs.size());
-      Assert.assertArrayEquals(value1, xAttrs.get(name1));
-      Assert.assertArrayEquals(value2, xAttrs.get(name2));
-      Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
-      Assert.assertArrayEquals(value4, xAttrs.get(name4));
+      assertEquals(4, xAttrs.size());
+      assertArrayEquals(value1, xAttrs.get(name1));
+      assertArrayEquals(value2, xAttrs.get(name2));
+      assertArrayEquals(new byte[0], xAttrs.get(name3));
+      assertArrayEquals(value4, xAttrs.get(name4));
     }
   }
 
@@ -667,8 +681,8 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
       fs = FileSystem.get(getProxiedFSConf());
       Map<String, byte[]> xAttrs = fs.getXAttrs(path);
       fs.close();
-      Assert.assertEquals(1, xAttrs.size());
-      Assert.assertArrayEquals(value2, xAttrs.get(name2));
+      assertEquals(1, xAttrs.size());
+      assertArrayEquals(value2, xAttrs.get(name2));
     }
   }
 
@@ -700,11 +714,11 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
 
       fs = getHttpFSFileSystem();
       List<String> names = fs.listXAttrs(path);
-      Assert.assertEquals(4, names.size());
-      Assert.assertTrue(names.contains(name1));
-      Assert.assertTrue(names.contains(name2));
-      Assert.assertTrue(names.contains(name3));
-      Assert.assertTrue(names.contains(name4));
+      assertEquals(4, names.size());
+      assertTrue(names.contains(name1));
+      assertTrue(names.contains(name2));
+      assertTrue(names.contains(name3));
+      assertTrue(names.contains(name4));
     }
   }
 
@@ -715,18 +729,26 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
    * @throws Exception
    */
   private void assertSameAcls(AclStatus a, AclStatus b) throws Exception {
-    Assert.assertTrue(a.getOwner().equals(b.getOwner()));
-    Assert.assertTrue(a.getGroup().equals(b.getGroup()));
-    Assert.assertTrue(a.isStickyBit() == b.isStickyBit());
-    Assert.assertTrue(a.getEntries().size() == b.getEntries().size());
+    assertTrue(a.getOwner().equals(b.getOwner()));
+    assertTrue(a.getGroup().equals(b.getGroup()));
+    assertTrue(a.isStickyBit() == b.isStickyBit());
+    assertTrue(a.getEntries().size() == b.getEntries().size());
     for (AclEntry e : a.getEntries()) {
-      Assert.assertTrue(b.getEntries().contains(e));
+      assertTrue(b.getEntries().contains(e));
     }
     for (AclEntry e : b.getEntries()) {
-      Assert.assertTrue(a.getEntries().contains(e));
+      assertTrue(a.getEntries().contains(e));
     }
   }
 
+  private static void assertSameAclBit(FileSystem expected, FileSystem actual,
+      Path path) throws IOException {
+    FileStatus expectedFileStatus = expected.getFileStatus(path);
+    FileStatus actualFileStatus = actual.getFileStatus(path);
+    assertEquals(actualFileStatus.getPermission().getAclBit(),
+        expectedFileStatus.getPermission().getAclBit());
+  }
+
   /**
    * Simple ACL tests on a file:  Set an acl, add an acl, remove one acl,
    * and remove all acls.
@@ -755,26 +777,31 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     AclStatus proxyAclStat = proxyFs.getAclStatus(path);
     AclStatus httpfsAclStat = httpfs.getAclStatus(path);
     assertSameAcls(httpfsAclStat, proxyAclStat);
+    assertSameAclBit(httpfs, proxyFs, path);
 
     httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet,true));
     proxyAclStat = proxyFs.getAclStatus(path);
     httpfsAclStat = httpfs.getAclStatus(path);
     assertSameAcls(httpfsAclStat, proxyAclStat);
+    assertSameAclBit(httpfs, proxyFs, path);
 
     httpfs.modifyAclEntries(path, AclEntry.parseAclSpec(aclUser2, true));
     proxyAclStat = proxyFs.getAclStatus(path);
     httpfsAclStat = httpfs.getAclStatus(path);
     assertSameAcls(httpfsAclStat, proxyAclStat);
+    assertSameAclBit(httpfs, proxyFs, path);
 
     httpfs.removeAclEntries(path, AclEntry.parseAclSpec(rmAclUser1, false));
     proxyAclStat = proxyFs.getAclStatus(path);
     httpfsAclStat = httpfs.getAclStatus(path);
     assertSameAcls(httpfsAclStat, proxyAclStat);
+    assertSameAclBit(httpfs, proxyFs, path);
 
     httpfs.removeAcl(path);
     proxyAclStat = proxyFs.getAclStatus(path);
     httpfsAclStat = httpfs.getAclStatus(path);
     assertSameAcls(httpfsAclStat, proxyAclStat);
+    assertSameAclBit(httpfs, proxyFs, path);
   }
 
   /**
@@ -797,25 +824,46 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
     AclStatus proxyAclStat = proxyFs.getAclStatus(dir);
     AclStatus httpfsAclStat = httpfs.getAclStatus(dir);
     assertSameAcls(httpfsAclStat, proxyAclStat);
+    assertSameAclBit(httpfs, proxyFs, dir);
 
     /* Set a default ACL on the directory */
     httpfs.setAcl(dir, (AclEntry.parseAclSpec(defUser1,true)));
     proxyAclStat = proxyFs.getAclStatus(dir);
     httpfsAclStat = httpfs.getAclStatus(dir);
     assertSameAcls(httpfsAclStat, proxyAclStat);
+    assertSameAclBit(httpfs, proxyFs, dir);
 
     /* Remove the default ACL */
     httpfs.removeDefaultAcl(dir);
     proxyAclStat = proxyFs.getAclStatus(dir);
     httpfsAclStat = httpfs.getAclStatus(dir);
     assertSameAcls(httpfsAclStat, proxyAclStat);
+    assertSameAclBit(httpfs, proxyFs, dir);
+  }
+
+  private void testEncryption() throws Exception {
+    if (isLocalFS()) {
+      return;
+    }
+    FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
+    FileSystem httpFs = getHttpFSFileSystem();
+    FileStatus proxyStatus = proxyFs.getFileStatus(TestHdfsHelper
+        .ENCRYPTED_FILE);
+    assertTrue(proxyStatus.isEncrypted());
+    FileStatus httpStatus = httpFs.getFileStatus(TestHdfsHelper
+        .ENCRYPTED_FILE);
+    assertTrue(httpStatus.isEncrypted());
+    proxyStatus = proxyFs.getFileStatus(new Path("/"));
+    httpStatus = httpFs.getFileStatus(new Path("/"));
+    assertFalse(proxyStatus.isEncrypted());
+    assertFalse(httpStatus.isEncrypted());
   }
 
   protected enum Operation {
     GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS, 
     WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER, 
     SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR,
-    GET_XATTRS, REMOVE_XATTR, LIST_XATTRS
+    GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION
   }
 
   private void operation(Operation op) throws Exception {
@@ -889,6 +937,9 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
       case LIST_XATTRS:
         testListXAttrs();
         break;
+      case ENCRYPTION:
+        testEncryption();
+        break;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c24388a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
index 85cf48d..5695285 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
@@ -21,10 +21,14 @@ import java.io.File;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.Test;
 import org.junit.runners.model.FrameworkMethod;
@@ -129,6 +133,9 @@ public class TestHdfsHelper extends TestDirHelper {
     return new Configuration(conf);
   }
 
+  public static final Path ENCRYPTION_ZONE = new Path("/ez");
+  public static final Path ENCRYPTED_FILE = new Path("/ez/encfile");
+
   private static MiniDFSCluster MINI_DFS = null;
 
   private static synchronized MiniDFSCluster startMiniHdfs(Configuration conf) 
throws Exception {
@@ -148,14 +155,28 @@ public class TestHdfsHelper extends TestDirHelper {
       conf.set("hadoop.security.authentication", "simple");
       conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
       conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+      FileSystemTestHelper helper = new FileSystemTestHelper();
+      final String jceksPath = JavaKeyStoreProvider.SCHEME_NAME + "://file" +
+          new Path(helper.getTestRootDir(), "test.jks").toUri();
+      conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, jceksPath);
       MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
       builder.numDataNodes(2);
       MiniDFSCluster miniHdfs = builder.build();
-      FileSystem fileSystem = miniHdfs.getFileSystem();
+      final String testkey = "testkey";
+      DFSTestUtil.createKey(testkey, miniHdfs, conf);
+
+      DistributedFileSystem fileSystem = miniHdfs.getFileSystem();
+      fileSystem.getClient().setKeyProvider(miniHdfs.getNameNode()
+          .getNamesystem().getProvider());
+
       fileSystem.mkdirs(new Path("/tmp"));
       fileSystem.mkdirs(new Path("/user"));
       fileSystem.setPermission(new Path("/tmp"), 
FsPermission.valueOf("-rwxrwxrwx"));
       fileSystem.setPermission(new Path("/user"), 
FsPermission.valueOf("-rwxrwxrwx"));
+      fileSystem.mkdirs(ENCRYPTION_ZONE);
+      fileSystem.createEncryptionZone(ENCRYPTION_ZONE, testkey);
+      fileSystem.create(ENCRYPTED_FILE).close();
+
       MINI_DFS = miniHdfs;
     }
     return MINI_DFS;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to