[1/2] hadoop git commit: HDFS-10893. Refactor TestDFSShell by setting up MiniDFSCluser once for all commands test. Contributed by Mingliang Liu

2016-10-06 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 94a6f6598 -> 7aee005c0


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7aee005c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 6068978..88f0c95 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -66,6 +66,10 @@ import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
+import org.junit.rules.Timeout;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
@@ -95,6 +99,37 @@ public class TestDFSShell {
   private static final byte[] RAW_A1_VALUE = new byte[]{0x32, 0x32, 0x32};
   private static final byte[] TRUSTED_A1_VALUE = new byte[]{0x31, 0x31, 0x31};
   private static final byte[] USER_A1_VALUE = new byte[]{0x31, 0x32, 0x33};
+  private static final int BLOCK_SIZE = 1024;
+
+  private static MiniDFSCluster miniCluster;
+  private static DistributedFileSystem dfs;
+
+  @BeforeClass
+  public static void setup() throws IOException {
+final Configuration conf = new Configuration();
+conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+// set up the shared miniCluster directory so individual tests can launch
+// new clusters without conflict
+conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
+GenericTestUtils.getTestDir("TestDFSShell").getAbsolutePath());
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+
+miniCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+miniCluster.waitActive();
+dfs = miniCluster.getFileSystem();
+  }
+
+  @AfterClass
+  public static void tearDown() {
+if (miniCluster != null) {
+  miniCluster.shutdown(true, true);
+}
+  }
+
+  @Rule
+  public Timeout globalTimeout= new Timeout(30 * 1000); // 30s
 
   static Path writeFile(FileSystem fs, Path f) throws IOException {
 DataOutputStream out = fs.create(f);
@@ -146,102 +181,74 @@ public class TestDFSShell {
 
   @Test (timeout = 3)
   public void testZeroSizeFile() throws IOException {
-Configuration conf = new HdfsConfiguration();
-MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-FileSystem fs = cluster.getFileSystem();
-assertTrue("Not a HDFS: "+fs.getUri(),
-   fs instanceof DistributedFileSystem);
-final DistributedFileSystem dfs = (DistributedFileSystem)fs;
-
-try {
-  //create a zero size file
-  final File f1 = new File(TEST_ROOT_DIR, "f1");
-  assertTrue(!f1.exists());
-  assertTrue(f1.createNewFile());
-  assertTrue(f1.exists());
-  assertTrue(f1.isFile());
-  assertEquals(0L, f1.length());
-  
-  //copy to remote
-  final Path root = mkdir(dfs, new Path("/test/zeroSizeFile"));
-  final Path remotef = new Path(root, "dst");
-  show("copy local " + f1 + " to remote " + remotef);
-  dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), remotef);
-  
-  //getBlockSize() should not throw exception
-  show("Block size = " + dfs.getFileStatus(remotef).getBlockSize());
-
-  //copy back
-  final File f2 = new File(TEST_ROOT_DIR, "f2");
-  assertTrue(!f2.exists());
-  dfs.copyToLocalFile(remotef, new Path(f2.getPath()));
-  assertTrue(f2.exists());
-  assertTrue(f2.isFile());
-  assertEquals(0L, f2.length());
-  
-  f1.delete();
-  f2.delete();
-} finally {
-  try {dfs.close();} catch (Exception e) {}
-  cluster.shutdown();
-}
+//create a zero size file
+final File f1 = new File(TEST_ROOT_DIR, "f1");
+assertTrue(!f1.exists());
+assertTrue(f1.createNewFile());
+assertTrue(f1.exists());
+assertTrue(f1.isFile());
+assertEquals(0L, f1.length());
+
+//copy to remote
+final Path root = mkdir(dfs, new Path("/test/zeroSizeFile"));
+final Path remotef = new Path(root, "dst");
+show("copy local " + f1 + " to remote " + remotef);
+dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), remotef);
+
+//getBlockSize() should not throw exception
+show("Block size = " + dfs.getFileStatus(remotef).getBlockSize());
+
+//copy back
+final 

[1/2] hadoop git commit: HDFS-10893. Refactor TestDFSShell by setting up MiniDFSCluser once for all commands test. Contributed by Mingliang Liu

2016-10-05 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk c5ca21691 -> 202325485


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20232548/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 558bcda..5e5b8b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -66,6 +66,10 @@ import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
+import org.junit.rules.Timeout;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
@@ -95,6 +99,37 @@ public class TestDFSShell {
   private static final byte[] RAW_A1_VALUE = new byte[]{0x32, 0x32, 0x32};
   private static final byte[] TRUSTED_A1_VALUE = new byte[]{0x31, 0x31, 0x31};
   private static final byte[] USER_A1_VALUE = new byte[]{0x31, 0x32, 0x33};
+  private static final int BLOCK_SIZE = 1024;
+
+  private static MiniDFSCluster miniCluster;
+  private static DistributedFileSystem dfs;
+
+  @BeforeClass
+  public static void setup() throws IOException {
+final Configuration conf = new Configuration();
+conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+// set up the shared miniCluster directory so individual tests can launch
+// new clusters without conflict
+conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
+GenericTestUtils.getTestDir("TestDFSShell").getAbsolutePath());
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+
+miniCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+miniCluster.waitActive();
+dfs = miniCluster.getFileSystem();
+  }
+
+  @AfterClass
+  public static void tearDown() {
+if (miniCluster != null) {
+  miniCluster.shutdown(true, true);
+}
+  }
+
+  @Rule
+  public Timeout globalTimeout= new Timeout(30 * 1000); // 30s
 
   static Path writeFile(FileSystem fs, Path f) throws IOException {
 DataOutputStream out = fs.create(f);
@@ -154,106 +189,77 @@ public class TestDFSShell {
 
   @Test (timeout = 3)
   public void testZeroSizeFile() throws IOException {
-Configuration conf = new HdfsConfiguration();
-MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-FileSystem fs = cluster.getFileSystem();
-assertTrue("Not a HDFS: "+fs.getUri(),
-   fs instanceof DistributedFileSystem);
-final DistributedFileSystem dfs = (DistributedFileSystem)fs;
-
-try {
-  //create a zero size file
-  final File f1 = new File(TEST_ROOT_DIR, "f1");
-  assertTrue(!f1.exists());
-  assertTrue(f1.createNewFile());
-  assertTrue(f1.exists());
-  assertTrue(f1.isFile());
-  assertEquals(0L, f1.length());
-
-  //copy to remote
-  final Path root = mkdir(dfs, new Path("/test/zeroSizeFile"));
-  final Path remotef = new Path(root, "dst");
-  show("copy local " + f1 + " to remote " + remotef);
-  dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), remotef);
-
-  //getBlockSize() should not throw exception
-  show("Block size = " + dfs.getFileStatus(remotef).getBlockSize());
-
-  //copy back
-  final File f2 = new File(TEST_ROOT_DIR, "f2");
-  assertTrue(!f2.exists());
-  dfs.copyToLocalFile(remotef, new Path(f2.getPath()));
-  assertTrue(f2.exists());
-  assertTrue(f2.isFile());
-  assertEquals(0L, f2.length());
-
-  f1.delete();
-  f2.delete();
-} finally {
-  try {dfs.close();} catch (Exception e) {}
-  cluster.shutdown();
-}
+//create a zero size file
+final File f1 = new File(TEST_ROOT_DIR, "f1");
+assertTrue(!f1.exists());
+assertTrue(f1.createNewFile());
+assertTrue(f1.exists());
+assertTrue(f1.isFile());
+assertEquals(0L, f1.length());
+
+//copy to remote
+final Path root = mkdir(dfs, new Path("/testZeroSizeFile/zeroSizeFile"));
+final Path remotef = new Path(root, "dst");
+show("copy local " + f1 + " to remote " + remotef);
+dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), remotef);
+
+//getBlockSize() should not throw exception
+show("Block size = " + dfs.getFileStatus(remotef).getBlockSize());
+
+//copy back
+final File 

[1/2] hadoop git commit: HDFS-10893. Refactor TestDFSShell by setting up MiniDFSCluser once for all commands test. Contributed by Mingliang Liu

2016-10-05 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 434403a2a -> 14bacd2b9


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14bacd2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 6068978..88f0c95 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -66,6 +66,10 @@ import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
+import org.junit.rules.Timeout;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
@@ -95,6 +99,37 @@ public class TestDFSShell {
   private static final byte[] RAW_A1_VALUE = new byte[]{0x32, 0x32, 0x32};
   private static final byte[] TRUSTED_A1_VALUE = new byte[]{0x31, 0x31, 0x31};
   private static final byte[] USER_A1_VALUE = new byte[]{0x31, 0x32, 0x33};
+  private static final int BLOCK_SIZE = 1024;
+
+  private static MiniDFSCluster miniCluster;
+  private static DistributedFileSystem dfs;
+
+  @BeforeClass
+  public static void setup() throws IOException {
+final Configuration conf = new Configuration();
+conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
+conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+// set up the shared miniCluster directory so individual tests can launch
+// new clusters without conflict
+conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
+GenericTestUtils.getTestDir("TestDFSShell").getAbsolutePath());
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+
+miniCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+miniCluster.waitActive();
+dfs = miniCluster.getFileSystem();
+  }
+
+  @AfterClass
+  public static void tearDown() {
+if (miniCluster != null) {
+  miniCluster.shutdown(true, true);
+}
+  }
+
+  @Rule
+  public Timeout globalTimeout= new Timeout(30 * 1000); // 30s
 
   static Path writeFile(FileSystem fs, Path f) throws IOException {
 DataOutputStream out = fs.create(f);
@@ -146,102 +181,74 @@ public class TestDFSShell {
 
   @Test (timeout = 3)
   public void testZeroSizeFile() throws IOException {
-Configuration conf = new HdfsConfiguration();
-MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-FileSystem fs = cluster.getFileSystem();
-assertTrue("Not a HDFS: "+fs.getUri(),
-   fs instanceof DistributedFileSystem);
-final DistributedFileSystem dfs = (DistributedFileSystem)fs;
-
-try {
-  //create a zero size file
-  final File f1 = new File(TEST_ROOT_DIR, "f1");
-  assertTrue(!f1.exists());
-  assertTrue(f1.createNewFile());
-  assertTrue(f1.exists());
-  assertTrue(f1.isFile());
-  assertEquals(0L, f1.length());
-  
-  //copy to remote
-  final Path root = mkdir(dfs, new Path("/test/zeroSizeFile"));
-  final Path remotef = new Path(root, "dst");
-  show("copy local " + f1 + " to remote " + remotef);
-  dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), remotef);
-  
-  //getBlockSize() should not throw exception
-  show("Block size = " + dfs.getFileStatus(remotef).getBlockSize());
-
-  //copy back
-  final File f2 = new File(TEST_ROOT_DIR, "f2");
-  assertTrue(!f2.exists());
-  dfs.copyToLocalFile(remotef, new Path(f2.getPath()));
-  assertTrue(f2.exists());
-  assertTrue(f2.isFile());
-  assertEquals(0L, f2.length());
-  
-  f1.delete();
-  f2.delete();
-} finally {
-  try {dfs.close();} catch (Exception e) {}
-  cluster.shutdown();
-}
+//create a zero size file
+final File f1 = new File(TEST_ROOT_DIR, "f1");
+assertTrue(!f1.exists());
+assertTrue(f1.createNewFile());
+assertTrue(f1.exists());
+assertTrue(f1.isFile());
+assertEquals(0L, f1.length());
+
+//copy to remote
+final Path root = mkdir(dfs, new Path("/test/zeroSizeFile"));
+final Path remotef = new Path(root, "dst");
+show("copy local " + f1 + " to remote " + remotef);
+dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), remotef);
+
+//getBlockSize() should not throw exception
+show("Block size = " + dfs.getFileStatus(remotef).getBlockSize());
+
+//copy back
+final