Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a0c54aeb0 -> d59555785


HDFS-7032. Add WebHDFS support for reading and writing to encryption zones. 
Contributed by Charles Lamb.

(cherry picked from commit 43b03030084839db041d0337013806aaeef12aaa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5955578
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5955578
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5955578

Branch: refs/heads/branch-2
Commit: d59555785935a25526f0b294038294ac6293fdc6
Parents: a0c54ae
Author: Andrew Wang <w...@apache.org>
Authored: Mon Sep 15 10:23:57 2014 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Mon Sep 15 10:24:38 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../web/resources/DatanodeWebHdfsMethods.java   | 13 +++--
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 52 ++++++++++++++++++++
 3 files changed, 63 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5955578/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7a42ea9..9afe524 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -389,6 +389,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-7045. Fix NameNode deadlock when opening file under /.reserved path.
     (Yi Liu via wang)
 
+    HDFS-7032. Add WebHDFS support for reading and writing to encryption zones.
+    (clamb via wang)
+
     BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
       HDFS-6387. HDFS CLI admin tool for creating & deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5955578/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
index 51731c8..0f0f3be 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
@@ -231,11 +231,13 @@ public class DatanodeWebHdfsMethods {
       DFSClient dfsclient = newDfsClient(nnId, conf);
       FSDataOutputStream out = null;
       try {
-        out = new FSDataOutputStream(dfsclient.create(
+        out = dfsclient.createWrappedOutputStream(dfsclient.create(
             fullpath, permission.getFsPermission(), 
-            overwrite.getValue() ? EnumSet.of(CreateFlag.CREATE, 
CreateFlag.OVERWRITE)
-                : EnumSet.of(CreateFlag.CREATE),
-            replication.getValue(conf), blockSize.getValue(conf), null, b, 
null), null);
+            overwrite.getValue() ?
+                EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) :
+                EnumSet.of(CreateFlag.CREATE),
+            replication.getValue(conf), blockSize.getValue(conf), null,
+            b, null), null);
         IOUtils.copyBytes(in, out, b);
         out.close();
         out = null;
@@ -418,7 +420,8 @@ public class DatanodeWebHdfsMethods {
       final DFSClient dfsclient = newDfsClient(nnId, conf);
       HdfsDataInputStream in = null;
       try {
-        in = new HdfsDataInputStream(dfsclient.open(fullpath, b, true));
+        in = dfsclient.createWrappedInputStream(
+            dfsclient.open(fullpath, b, true));
         in.seek(offset.getValue());
       } catch(IOException ioe) {
         IOUtils.cleanup(LOG, in);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5955578/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index b3bf5d9..68fc850 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSTestWrapper;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileContextTestWrapper;
@@ -62,6 +63,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -570,6 +573,55 @@ public class TestEncryptionZones {
     verifyFilesEqual(fs, encFile1, encFile2, len);
   }
 
+  @Test(timeout = 120000)
+  public void testReadWriteUsingWebHdfs() throws Exception {
+    final HdfsAdmin dfsAdmin =
+        new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+    final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+        WebHdfsFileSystem.SCHEME);
+
+    final Path zone = new Path("/zone");
+    fs.mkdirs(zone);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY);
+
+    /* Create an unencrypted file for comparison purposes. */
+    final Path unencFile = new Path("/unenc");
+    final int len = 8192;
+    DFSTestUtil.createFile(webHdfsFs, unencFile, len, (short) 1, 0xFEED);
+
+    /*
+     * Create the same file via webhdfs, but this time encrypted. Compare it
+     * using both webhdfs and DFS.
+     */
+    final Path encFile1 = new Path(zone, "myfile");
+    DFSTestUtil.createFile(webHdfsFs, encFile1, len, (short) 1, 0xFEED);
+    verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
+    verifyFilesEqual(fs, unencFile, encFile1, len);
+
+    /*
+     * Same thing except this time create the encrypted file using DFS.
+     */
+    final Path encFile2 = new Path(zone, "myfile2");
+    DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
+    verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
+    verifyFilesEqual(fs, unencFile, encFile2, len);
+
+    /* Verify appending to files works correctly. */
+    appendOneByte(fs, unencFile);
+    appendOneByte(webHdfsFs, encFile1);
+    appendOneByte(fs, encFile2);
+    verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
+    verifyFilesEqual(fs, unencFile, encFile1, len);
+    verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
+    verifyFilesEqual(fs, unencFile, encFile2, len);
+  }
+
+  private void appendOneByte(FileSystem fs, Path p) throws IOException {
+    final FSDataOutputStream out = fs.append(p);
+    out.write((byte) 0x123);
+    out.close();
+  }
+
   @Test(timeout = 60000)
   public void testCipherSuiteNegotiation() throws Exception {
     final HdfsAdmin dfsAdmin =

Reply via email to