Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 c8f2880f9 -> da6228e32


MAPREDUCE-6292. Use org.junit package instead of junit.framework in 
TestCombineFileInputFormat. (aajisaka)

(cherry picked from commit c770df49b4c9d5ac5ddaa4ff2ec541e4bd1a8e56)
(cherry picked from commit 2e509908eed4cf2e365ca1fd94aa2ff3d59193c1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da6228e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da6228e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da6228e3

Branch: refs/heads/branch-2.7
Commit: da6228e3202da3fb5d8e31ac935e5daecc7ea106
Parents: c8f2880
Author: Akira Ajisaka <aajis...@apache.org>
Authored: Wed Mar 25 19:00:35 2015 +0900
Committer: Akira Ajisaka <aajis...@apache.org>
Committed: Wed Mar 25 19:02:44 2015 +0900

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 ++
 .../lib/input/TestCombineFileInputFormat.java   | 36 ++++++++++++--------
 2 files changed, 25 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da6228e3/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b005c59..da29b4e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -84,6 +84,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6265. Make ContainerLauncherImpl.INITIAL_POOL_SIZE configurable 
     to better control to launch/kill containers. (Zhihai Xu via ozawa)
 
+    MAPREDUCE-6292. Use org.junit package instead of junit.framework in
+    TestCombineFileInputFormat. (aajisaka)
+
   OPTIMIZATIONS
 
     MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da6228e3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
----------------------------------------------------------------------
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
index db51ec6..85c675c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
@@ -29,9 +29,6 @@ import java.util.TreeMap;
 import java.util.concurrent.TimeoutException;
 import java.util.zip.GZIPOutputStream;
 
-import org.junit.Assert;
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -60,7 +57,11 @@ import org.junit.Test;
 
 import com.google.common.collect.HashMultiset;
 
-public class TestCombineFileInputFormat extends TestCase {
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestCombineFileInputFormat {
 
   private static final String rack1[] = new String[] {
     "/r1"
@@ -221,6 +222,7 @@ public class TestCombineFileInputFormat extends TestCase {
     }
   }
 
+  @Test
   public void testRecordReaderInit() throws InterruptedException, IOException {
     // Test that we properly initialize the child recordreader when
     // CombineFileInputFormat and CombineFileRecordReader are used.
@@ -258,6 +260,7 @@ public class TestCombineFileInputFormat extends TestCase {
       rr.getCurrentKey().toString());
   }
 
+  @Test
   public void testReinit() throws Exception {
     // Test that a split containing multiple files works correctly,
     // with the child RecordReader getting its initialize() method
@@ -296,6 +299,7 @@ public class TestCombineFileInputFormat extends TestCase {
     assertFalse(rr.nextKeyValue());
   }
 
+  @Test
   public void testSplitPlacement() throws Exception {
     MiniDFSCluster dfs = null;
     FileSystem fileSys = null;
@@ -725,6 +729,7 @@ public class TestCombineFileInputFormat extends TestCase {
     DFSTestUtil.waitReplication(fileSys, name, replication);
   }
 
+  @Test
   public void testNodeDistribution() throws IOException, InterruptedException {
     DummyInputFormat inFormat = new DummyInputFormat();
     int numBlocks = 60;
@@ -774,20 +779,21 @@ public class TestCombineFileInputFormat extends TestCase {
         maxSplitSize, minSizeNode, minSizeRack, splits);
 
     int expectedSplitCount = (int) (totLength / maxSplitSize);
-    Assert.assertEquals(expectedSplitCount, splits.size());
+    assertEquals(expectedSplitCount, splits.size());
 
     // Ensure 90+% of the splits have node local blocks.
     // 100% locality may not always be achieved.
     int numLocalSplits = 0;
     for (InputSplit inputSplit : splits) {
-      Assert.assertEquals(maxSplitSize, inputSplit.getLength());
+      assertEquals(maxSplitSize, inputSplit.getLength());
       if (inputSplit.getLocations().length == 1) {
         numLocalSplits++;
       }
     }
-    Assert.assertTrue(numLocalSplits >= 0.9 * splits.size());
+    assertTrue(numLocalSplits >= 0.9 * splits.size());
   }
-  
+
+  @Test
   public void testNodeInputSplit() throws IOException, InterruptedException {
     // Regression test for MAPREDUCE-4892. There are 2 nodes with all blocks 
on 
     // both nodes. The grouping ensures that both nodes get splits instead of 
@@ -826,18 +832,19 @@ public class TestCombineFileInputFormat extends TestCase {
                           maxSize, minSizeNode, minSizeRack, splits);
     
     int expectedSplitCount = (int)(totLength/maxSize);
-    Assert.assertEquals(expectedSplitCount, splits.size());
+    assertEquals(expectedSplitCount, splits.size());
     HashMultiset<String> nodeSplits = HashMultiset.create();
     for(int i=0; i<expectedSplitCount; ++i) {
       InputSplit inSplit = splits.get(i);
-      Assert.assertEquals(maxSize, inSplit.getLength());
-      Assert.assertEquals(1, inSplit.getLocations().length);
+      assertEquals(maxSize, inSplit.getLength());
+      assertEquals(1, inSplit.getLocations().length);
       nodeSplits.add(inSplit.getLocations()[0]);
     }
-    Assert.assertEquals(3, nodeSplits.count(locations[0]));
-    Assert.assertEquals(3, nodeSplits.count(locations[1]));
+    assertEquals(3, nodeSplits.count(locations[0]));
+    assertEquals(3, nodeSplits.count(locations[1]));
   }
-  
+
+  @Test
   public void testSplitPlacementForCompressedFiles() throws Exception {
     MiniDFSCluster dfs = null;
     FileSystem fileSys = null;
@@ -1190,6 +1197,7 @@ public class TestCombineFileInputFormat extends TestCase {
   /**
    * Test that CFIF can handle missing blocks.
    */
+  @Test
   public void testMissingBlocks() throws Exception {
     String namenode = null;
     MiniDFSCluster dfs = null;

Reply via email to