HDFS-10906. Add unit tests for Trash with HDFS encryption zones. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c62ae710
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c62ae710
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c62ae710

Branch: refs/heads/HADOOP-13070
Commit: c62ae7107f025091652e79db3edfca5c4dc84e4a
Parents: 6c348c5
Author: Xiaoyu Yao <x...@apache.org>
Authored: Mon Oct 17 15:25:24 2016 -0700
Committer: Xiaoyu Yao <x...@apache.org>
Committed: Tue Oct 18 14:05:43 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |  32 +-
 .../hdfs/TestTrashWithEncryptionZones.java      | 188 ++++++++
 .../TestTrashWithSecureEncryptionZones.java     | 443 +++++++++++++++++++
 3 files changed, 662 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c62ae710/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 963aaa6..7f26b03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -27,6 +27,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.BufferedOutputStream;
@@ -114,7 +115,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -167,6 +167,7 @@ import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Level;
 import org.junit.Assume;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.apache.hadoop.util.ToolRunner;
 
 import com.google.common.annotations.VisibleForTesting;
 import static 
org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
@@ -2054,4 +2055,33 @@ public class DFSTestUtil {
       }
     }
   }
+
+  public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
+      boolean shouldExistInTrash) throws Exception {
+    Path trashPath = Path.mergePaths(shell.getCurrentTrashDir(path), path);
+
+    verifyDelete(shell, fs, path, trashPath, shouldExistInTrash);
+  }
+
+  public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
+      Path trashPath, boolean shouldExistInTrash) throws Exception {
+    assertTrue(path + " file does not exist", fs.exists(path));
+
+    // Verify that trashPath has a path component named ".Trash"
+    Path checkTrash = trashPath;
+    while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
+      checkTrash = checkTrash.getParent();
+    }
+    assertEquals("No .Trash component found in trash path " + trashPath,
+        ".Trash", checkTrash.getName());
+
+    String[] argv = new String[]{"-rm", "-r", path.toString()};
+    int res = ToolRunner.run(shell, argv);
+    assertEquals("rm failed", 0, res);
+    if (shouldExistInTrash) {
+      assertTrue("File not in trash : " + trashPath, fs.exists(trashPath));
+    } else {
+      assertFalse("File in trash : " + trashPath, fs.exists(trashPath));
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c62ae710/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
new file mode 100644
index 0000000..2a8d493
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import static org.junit.Assert.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.security.PrivilegedExceptionAction;
+import java.util.EnumSet;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * This class tests Trash functionality in Encryption Zones.
+ */
+public class TestTrashWithEncryptionZones {
+  private Configuration conf;
+  private FileSystemTestHelper fsHelper;
+
+  private MiniDFSCluster cluster;
+  private HdfsAdmin dfsAdmin;
+  private DistributedFileSystem fs;
+  private File testRootDir;
+  private static final String TEST_KEY = "test_key";
+
+  private FileSystemTestWrapper fsWrapper;
+  private static Configuration clientConf;
+  private static FsShell shell;
+
+  private static AtomicInteger zoneCounter = new AtomicInteger(1);
+  private static AtomicInteger fileCounter = new AtomicInteger(1);
+  private static final int LEN = 8192;
+
+  private static final EnumSet< CreateEncryptionZoneFlag > NO_TRASH =
+      EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH);
+  private static final EnumSet<CreateEncryptionZoneFlag> PROVISION_TRASH =
+      EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH);
+
+  private String getKeyProviderURI() {
+    return JavaKeyStoreProvider.SCHEME_NAME + "://file" +
+        new Path(testRootDir.toString(), "test.jks").toUri();
+  }
+
+  @Before
+  public void setup() throws Exception {
+    conf = new HdfsConfiguration();
+    fsHelper = new FileSystemTestHelper();
+    // Set up java key store
+    String testRoot = fsHelper.getTestRootDir();
+    testRootDir = new File(testRoot).getAbsoluteFile();
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
+        getKeyProviderURI());
+    conf.setBoolean(DFSConfigKeys
+        .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+    // Lower the batch size for testing
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
+        2);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    org.apache.log4j.Logger
+        .getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
+    fs = cluster.getFileSystem();
+    fsWrapper = new FileSystemTestWrapper(fs);
+    dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
+    setProvider();
+    // Create a test key
+    DFSTestUtil.createKey(TEST_KEY, cluster, conf);
+
+    clientConf = new Configuration(conf);
+    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
+    shell = new FsShell(clientConf);
+  }
+
+  protected void setProvider() {
+    // Need to set the client's KeyProvider to the NN's for JKS,
+    // else the updates do not get flushed properly
+    fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
+        .getProvider());
+  }
+
+  @After
+  public void teardown() {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  @Test
+  public void testDeleteWithinEncryptionZone() throws Exception {
+    final Path zone = new Path("/zones");
+    fs.mkdirs(zone);
+    final Path zone1 = new Path("/zones/zone" + zoneCounter.getAndIncrement());
+    fs.mkdirs(zone1);
+    dfsAdmin.createEncryptionZone(zone1, TEST_KEY, PROVISION_TRASH);
+
+    final Path encFile1 = new Path(zone1, "encFile" + fileCounter
+        .getAndIncrement());
+    DFSTestUtil.createFile(fs, encFile1, LEN, (short) 1, 0xFEED);
+
+    //Verify file deletion
+    DFSTestUtil.verifyDelete(shell, fs, encFile1, true);
+
+    //Verify directory deletion
+    DFSTestUtil.verifyDelete(shell, fs, zone1, true);
+  }
+
+  @Test
+  public void testDeleteEZWithMultipleUsers() throws Exception {
+    final Path zone = new Path("/zones");
+    fs.mkdirs(zone);
+    final Path zone1 = new Path("/zones/zone" + zoneCounter.getAndIncrement());
+    fs.mkdirs(zone1);
+    dfsAdmin.createEncryptionZone(zone1, TEST_KEY, NO_TRASH);
+
+    fsWrapper.setPermission(zone1,
+        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
+
+    final Path encFile1 = new Path(zone1, "encFile" + fileCounter
+        .getAndIncrement());
+    DFSTestUtil.createFile(fs, encFile1, LEN, (short) 1, 0xFEED);
+
+    // create a non-privileged user
+    final UserGroupInformation user = UserGroupInformation
+        .createUserForTesting("user", new String[]{"mygroup"});
+
+    final Path encFile2 = new Path(zone1, "encFile" + fileCounter
+        .getAndIncrement());
+
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        // create a file /zones/zone1/encFile2 in EZ
+        // this file is owned by user:mygroup
+        FileSystem fs2 = FileSystem.get(cluster.getConfiguration(0));
+        DFSTestUtil.createFile(fs2, encFile2, LEN, (short) 1, 0xFEED);
+
+        // Delete /zones/zone1/encFile2, which moves the file to
+        // /zones/zone1/.Trash/user/Current/zones/zone1/encFile2
+        DFSTestUtil.verifyDelete(shell, fs, encFile2, true);
+
+        // Delete /zones/zone1 should not succeed as current user is not admin
+        String[] argv = new String[]{"-rm", "-r", zone1.toString()};
+        int res = ToolRunner.run(shell, argv);
+        assertEquals("Non-admin could delete an encryption zone with multiple" 
+
+            " users : " + zone1, 1, res);
+        return null;
+      }
+    });
+
+    shell = new FsShell(clientConf);
+    DFSTestUtil.verifyDelete(shell, fs, zone1, true);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c62ae710/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
new file mode 100644
index 0000000..314adfb
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
@@ -0,0 +1,443 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.apache.hadoop.fs.CommonConfigurationKeys
+    .IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
+    .FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
+    .KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.
+    KMS_CLIENT_ENC_KEY_CACHE_SIZE;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.
+    DFS_DATA_TRANSFER_PROTECTION_KEY;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.server.KMSConfiguration;
+import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.Writer;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * This class tests Trash functionality in Encryption Zones with Kerberos
+ * enabled.
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+public class TestTrashWithSecureEncryptionZones {
+  private static HdfsConfiguration baseConf;
+  private static File baseDir;
+  private static final EnumSet<CreateEncryptionZoneFlag> PROVISION_TRASH =
+      EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH);
+
+  private static final String HDFS_USER_NAME = "hdfs";
+  private static final String SPNEGO_USER_NAME = "HTTP";
+  private static final String OOZIE_USER_NAME = "oozie";
+  private static final String OOZIE_PROXIED_USER_NAME = "oozie_user";
+
+  private static String hdfsPrincipal;
+  private static String spnegoPrincipal;
+  private static String keytab;
+
+  // MiniKDC
+  private static MiniKdc kdc;
+
+  // MiniKMS
+  private static MiniKMS miniKMS;
+  private static final String TEST_KEY = "test_key";
+  private static final Path CURRENT = new Path("Current");
+
+  // MiniDFS
+  private static MiniDFSCluster cluster;
+  private static HdfsConfiguration conf;
+  private static FileSystem fs;
+  private static HdfsAdmin dfsAdmin;
+  private static Configuration clientConf;
+  private static FsShell shell;
+
+  private static AtomicInteger zoneCounter = new AtomicInteger(1);
+  private static AtomicInteger fileCounter = new AtomicInteger(1);
+  private static final int LEN = 8192;
+
+  public static File getTestDir() throws Exception {
+    File file = new File("dummy");
+    file = file.getAbsoluteFile();
+    file = file.getParentFile();
+    file = new File(file, "target");
+    file = new File(file, UUID.randomUUID().toString());
+    if (!file.mkdirs()) {
+      throw new RuntimeException("Could not create test directory: " + file);
+    }
+    return file;
+  }
+
+  @BeforeClass
+  public static void init() throws Exception {
+    baseDir = getTestDir();
+    FileUtil.fullyDelete(baseDir);
+    assertTrue(baseDir.mkdirs());
+
+    Properties kdcConf = MiniKdc.createConf();
+    kdc = new MiniKdc(kdcConf, baseDir);
+    kdc.start();
+
+    baseConf = new HdfsConfiguration();
+    SecurityUtil.setAuthenticationMethod(UserGroupInformation
+        .AuthenticationMethod.KERBEROS, baseConf);
+    UserGroupInformation.setConfiguration(baseConf);
+    assertTrue("Expected configuration to enable security",
+        UserGroupInformation.isSecurityEnabled());
+
+    File keytabFile = new File(baseDir, "test.keytab");
+    keytab = keytabFile.getAbsolutePath();
+    // Windows will not reverse name lookup "127.0.0.1" to "localhost".
+    String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost";
+
+    kdc.createPrincipal(keytabFile,
+        HDFS_USER_NAME + "/" + krbInstance,
+        SPNEGO_USER_NAME + "/" + krbInstance,
+        OOZIE_USER_NAME + "/" + krbInstance,
+        OOZIE_PROXIED_USER_NAME + "/" + krbInstance);
+
+    hdfsPrincipal = HDFS_USER_NAME + "/" + krbInstance + "@" + kdc.getRealm();
+    spnegoPrincipal = SPNEGO_USER_NAME + "/" + krbInstance + "@" + kdc
+        .getRealm();
+
+    baseConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
+    baseConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
+    baseConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
+    baseConf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
+    baseConf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+        spnegoPrincipal);
+    baseConf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
+    baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
+    baseConf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+    baseConf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
+    baseConf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+    baseConf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
+    baseConf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
+
+    // Set a small (2=4*0.5) KMSClient EDEK cache size to trigger
+    // on demand refill upon the 3rd file creation
+    baseConf.set(KMS_CLIENT_ENC_KEY_CACHE_SIZE, "4");
+    baseConf.set(KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK, "0.5");
+
+    String keystoresDir = baseDir.getAbsolutePath();
+    String sslConfDir = KeyStoreTestUtil.getClasspathDir(
+        TestSecureEncryptionZoneWithKMS.class);
+    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, baseConf, false);
+    baseConf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+        KeyStoreTestUtil.getClientSSLConfigFileName());
+    baseConf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+        KeyStoreTestUtil.getServerSSLConfigFileName());
+
+    File kmsFile = new File(baseDir, "kms-site.xml");
+    if (kmsFile.exists()) {
+      FileUtil.fullyDelete(kmsFile);
+    }
+
+    Configuration kmsConf = new Configuration(true);
+    kmsConf.set(
+        KMSConfiguration.KEY_PROVIDER_URI,
+        "jceks://file@" + new Path(baseDir.toString(), "kms.keystore")
+            .toUri());
+    kmsConf.set("hadoop.kms.authentication.type", "kerberos");
+    kmsConf.set("hadoop.kms.authentication.kerberos.keytab", keytab);
+    kmsConf.set("hadoop.kms.authentication.kerberos.principal",
+        "HTTP/localhost");
+    kmsConf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
+    kmsConf.set("hadoop.kms.acl.GENERATE_EEK", "hdfs");
+
+    Writer writer = new FileWriter(kmsFile);
+    kmsConf.writeXml(writer);
+    writer.close();
+
+    // Start MiniKMS
+    MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder();
+    miniKMS = miniKMSBuilder.setKmsConfDir(baseDir).build();
+    miniKMS.start();
+
+    baseConf.set(CommonConfigurationKeysPublic
+        .HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI());
+    baseConf.setBoolean(DFSConfigKeys
+        .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+
+    conf = new HdfsConfiguration(baseConf);
+    cluster = new MiniDFSCluster.Builder(conf)
+        .build();
+    cluster.waitActive();
+
+    fs = cluster.getFileSystem();
+    dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
+
+    // Wait cluster to be active
+    cluster.waitActive();
+
+    // Create a test key
+    DFSTestUtil.createKey(TEST_KEY, cluster, conf);
+    clientConf = new Configuration(conf);
+    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
+    shell = new FsShell(clientConf);
+    System.setProperty("user.name", HDFS_USER_NAME);
+  }
+
+  @AfterClass
+  public static void destroy() {
+    IOUtils.cleanup(null, fs);
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+
+    if (kdc != null) {
+      kdc.stop();
+    }
+    if (miniKMS != null) {
+      miniKMS.stop();
+    }
+    FileUtil.fullyDelete(baseDir);
+  }
+
+  private static String getKeyProviderURI() {
+    return KMSClientProvider.SCHEME_NAME + "://" +
+        miniKMS.getKMSUrl().toExternalForm().replace("://", "@");
+  }
+
+  @Test
+  public void testTrashCheckpoint() throws Exception {
+    final Path zone1 = new Path("/zone" + zoneCounter.getAndIncrement());
+    fs.mkdirs(zone1);
+    final Path zone2 = new Path(zone1 + "/zone" +
+        zoneCounter.getAndIncrement());
+    fs.mkdirs(zone2);
+    dfsAdmin.createEncryptionZone(zone2, TEST_KEY, PROVISION_TRASH);
+
+    final Path encFile1 = new Path(zone2, "encFile" + fileCounter
+        .getAndIncrement());
+    DFSTestUtil.createFile(fs, encFile1, LEN, (short) 1, 0xFEED);
+
+    //Verify Trash checkpoint within Encryption Zone
+    Path trashDir = new Path(zone2, fs.TRASH_PREFIX + "/" + HDFS_USER_NAME +
+        "/" + CURRENT);
+    String trashPath = trashDir.toString() + encFile1.toString();
+    Path deletedFile = verifyTrashLocationWithShellDelete(encFile1);
+    assertEquals("Deleted file not at the expected trash location: " +
+        trashPath, trashPath, deletedFile.toUri().getPath());
+
+    //Verify Trash checkpoint outside the encryption zone when the whole
+    // encryption zone is deleted and moved
+    trashPath = fs.getHomeDirectory().toUri().getPath() + "/" + fs
+        .TRASH_PREFIX + "/" + CURRENT + zone2;
+    Path deletedDir = verifyTrashLocationWithShellDelete(zone2);
+    assertEquals("Deleted zone not at the expected trash location: " +
+        trashPath, trashPath, deletedDir.toUri().getPath());
+  }
+
+  @Test
+  public void testTrashExpunge() throws Exception {
+    final Path zone1 = new Path("/zone" + zoneCounter.getAndIncrement());
+    fs.mkdirs(zone1);
+    final Path zone2 = new Path("/zone" + zoneCounter.getAndIncrement());
+    fs.mkdirs(zone2);
+    dfsAdmin.createEncryptionZone(zone1, TEST_KEY, PROVISION_TRASH);
+
+    final Path file1 = new Path(zone1, "encFile" + fileCounter
+        .getAndIncrement());
+    final Path file2 = new Path(zone2, "file" + fileCounter.getAndIncrement());
+    DFSTestUtil.createFile(fs, file1, LEN, (short) 1, 0xFEED);
+    DFSTestUtil.createFile(fs, file2, LEN, (short) 1, 0xFEED);
+
+    //Verify Trash expunge within the encryption zone
+    List<Path> trashPaths = Lists.newArrayList();
+    trashPaths.add(verifyTrashLocationWithShellDelete(file1));
+    trashPaths.add(verifyTrashLocationWithShellDelete(file2));
+    verifyTrashExpunge(trashPaths);
+
+    //Verify Trash expunge when the whole encryption zone has been deleted
+    final Path file3 = new Path(zone1, "encFile" + fileCounter
+        .getAndIncrement());
+    DFSTestUtil.createFile(fs, file3, LEN, (short) 1, 0XFEED);
+    Path trashPath = verifyTrashLocationWithShellDelete(file3);
+    //Delete encryption zone
+    DFSTestUtil.verifyDelete(shell, fs, zone1, true);
+    verifyTrashExpunge(Lists.newArrayList(trashPath));
+
+  }
+
+  @Test
+  public void testDeleteWithSkipTrash() throws Exception {
+    final Path zone1 = new Path("/zone" + zoneCounter.getAndIncrement());
+    fs.mkdirs(zone1);
+
+    final Path encFile1 = new Path(zone1, "encFile" + fileCounter
+        .getAndIncrement());
+    final Path encFile2 = new Path(zone1, "encFile" + fileCounter
+        .getAndIncrement());
+    DFSTestUtil.createFile(fs, encFile1, LEN, (short) 1, 0xFEED);
+    DFSTestUtil.createFile(fs, encFile2, LEN, (short) 1, 0xFEED);
+
+    //Verify file deletion with skipTrash
+    verifyDeleteWithSkipTrash(encFile1);
+
+    //Verify file deletion without skipTrash
+    DFSTestUtil.verifyDelete(shell, fs, encFile2, true);
+  }
+
+  @Test
+  public void testDeleteEmptyDirectory() throws Exception {
+    final Path zone1 = new Path("/zone" + zoneCounter.getAndIncrement());
+    final Path zone2 = new Path("/zone" + zoneCounter.getAndIncrement());
+    fs.mkdirs(zone1);
+    fs.mkdirs(zone2);
+
+    final Path trashDir1 = new Path(shell.getCurrentTrashDir(zone1) + "/" +
+        zone1);
+    final Path trashDir2 = new Path(shell.getCurrentTrashDir(zone1) + "/" +
+        zone2);
+
+    //Delete empty directory with -r option
+    String[] argv1 = new String[]{"-rm", "-r", zone1.toString()};
+    int res = ToolRunner.run(shell, argv1);
+    assertEquals("rm failed", 0, res);
+    assertTrue("Empty directory not deleted even with -r : " + trashDir1, fs
+        .exists(trashDir1));
+
+    //Delete empty directory without -r option
+    String[] argv2 = new String[]{"-rm", zone2.toString()};
+    res = ToolRunner.run(shell, argv2);
+    assertEquals("rm on empty directory did not fail", 1, res);
+    assertTrue("Empty directory deleted without -r : " + trashDir2, !fs.exists(
+        trashDir2));
+  }
+
+  @Test
+  public void testDeleteFromTrashWithinEZ() throws Exception {
+    final Path zone1 = new Path("/zone" + zoneCounter.getAndIncrement());
+    fs.mkdirs(zone1);
+    dfsAdmin.createEncryptionZone(zone1, TEST_KEY, PROVISION_TRASH);
+
+    final Path encFile1 = new Path(zone1, "encFile" + fileCounter
+        .getAndIncrement());
+    DFSTestUtil.createFile(fs, encFile1, LEN, (short) 1, 0xFEED);
+
+    final Path trashFile = new Path(shell.getCurrentTrashDir(encFile1) + "/" +
+        encFile1);
+
+    String[] argv = new String[]{"-rm", "-r", encFile1.toString()};
+    int res = ToolRunner.run(shell, argv);
+    assertEquals("rm failed", 0, res);
+
+    String[] argvDeleteTrash = new String[]{"-rm", "-r", trashFile.toString()};
+    int resDeleteTrash = ToolRunner.run(shell, argvDeleteTrash);
+    assertEquals("rm failed", 0, resDeleteTrash);
+    assertFalse("File deleted from Trash : " + trashFile, 
fs.exists(trashFile));
+  }
+
+  @Test
+  public void testTrashRetentionAfterNamenodeRestart() throws Exception {
+    final Path zone1 = new Path("/zone" + zoneCounter.getAndIncrement());
+    fs.mkdirs(zone1);
+    dfsAdmin.createEncryptionZone(zone1, TEST_KEY, PROVISION_TRASH);
+
+    final Path encFile1 = new Path(zone1, "encFile" + fileCounter
+        .getAndIncrement());
+    DFSTestUtil.createFile(fs, encFile1, LEN, (short) 1, 0xFEED);
+
+    final Path trashFile = new Path(shell.getCurrentTrashDir(encFile1) + "/" +
+        encFile1);
+    String[] argv = new String[]{"-rm", "-r", encFile1.toString()};
+    int res = ToolRunner.run(shell, argv);
+    assertEquals("rm failed", 0, res);
+
+    assertTrue("File not in trash : " + trashFile, fs.exists(trashFile));
+    cluster.restartNameNode(0);
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+
+    assertTrue("On Namenode restart, file deleted from trash : " +
+        trashFile, fs.exists(trashFile));
+  }
+
+  private Path verifyTrashLocationWithShellDelete(Path path)
+      throws Exception {
+
+    final Path trashFile = new Path(shell.getCurrentTrashDir(path) + "/" +
+        path);
+    File deletedFile = new File(String.valueOf(trashFile));
+    assertFalse("File already present in Trash before delete", deletedFile
+        .exists());
+
+    DFSTestUtil.verifyDelete(shell, fs, path, trashFile, true);
+    return trashFile;
+  }
+
+  private void verifyTrashExpunge(List<Path> trashFiles) throws Exception {
+    String[] argv = new String[]{"-expunge"};
+    int res = ToolRunner.run(shell, argv);
+    assertEquals("expunge failed", 0, res);
+
+    for (Path trashFile : trashFiles) {
+      assertFalse("File exists in trash after expunge : " + trashFile, fs
+          .exists(trashFile));
+    }
+  }
+
+  private void verifyDeleteWithSkipTrash(Path path) throws Exception {
+    assertTrue(path + " file does not exist", fs.exists(path));
+
+    final Path trashFile = new Path(shell.getCurrentTrashDir(path) + "/" +
+        path);
+
+    String[] argv = new String[]{"-rm", "-r", "-skipTrash", path.toString()};
+    int res = ToolRunner.run(shell, argv);
+    assertEquals("rm failed", 0, res);
+    assertFalse("File in trash even with -skipTrash", fs.exists(trashFile));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to