Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java?view=diff&rev=566798&r1=566797&r2=566798 ============================================================================== --- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java (original) +++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java Thu Aug 16 11:45:49 2007 @@ -71,7 +71,8 @@ } // try splitting the file in a variety of sizes - InputFormat format = new SequenceFileAsTextInputFormat(); + InputFormat<Text, Text> format = + new SequenceFileAsTextInputFormat(); for (int i = 0; i < 3; i++) { int numSplits = @@ -83,12 +84,12 @@ // check each split BitSet bits = new BitSet(length); for (int j = 0; j < splits.length; j++) { - RecordReader reader = + RecordReader<Text, Text> reader = format.getRecordReader(splits[j], job, reporter); Class readerClass = reader.getClass(); assertEquals("reader class is SequenceFileAsTextRecordReader.", SequenceFileAsTextRecordReader.class, readerClass); - Text value = (Text)reader.createValue(); - Text key = (Text)reader.createKey(); + Text value = reader.createValue(); + Text key = reader.createKey(); try { int count = 0; while (reader.next(key, value)) {
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java?view=diff&rev=566798&r1=566797&r2=566798 ============================================================================== --- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java (original) +++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java Thu Aug 16 11:45:49 2007 @@ -70,7 +70,8 @@ private int countRecords(int numSplits) throws IOException { - InputFormat format = new SequenceFileInputFilter(); + InputFormat<Text, BytesWritable> format = + new SequenceFileInputFilter<Text, BytesWritable>(); Text key = new Text(); BytesWritable value = new BytesWritable(); if (numSplits==0) { @@ -83,7 +84,7 @@ int count = 0; LOG.info("Generated " + splits.length + " splits."); for (int j = 0; j < splits.length; j++) { - RecordReader reader = + RecordReader<Text, BytesWritable> reader = format.getRecordReader(splits[j], job, reporter); try { while (reader.next(key, value)) { Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java?view=diff&rev=566798&r1=566797&r2=566798 ============================================================================== --- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java (original) +++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java Thu Aug 16 11:45:49 2007 @@ -73,7 +73,8 @@ } // try splitting the file in a variety of sizes - InputFormat format = new SequenceFileInputFormat(); + InputFormat<IntWritable, BytesWritable> format = + new SequenceFileInputFormat<IntWritable, BytesWritable>(); IntWritable key = new IntWritable(); BytesWritable value = new BytesWritable(); for (int i = 0; i < 3; i++) { @@ -86,7 +87,7 @@ // check each split BitSet bits = new BitSet(length); for (int j = 0; j < splits.length; j++) { - RecordReader reader = + RecordReader<IntWritable, BytesWritable> reader = format.getRecordReader(splits[j], job, reporter); try { int count = 0; Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextInputFormat.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextInputFormat.java?view=diff&rev=566798&r1=566797&r2=566798 ============================================================================== --- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextInputFormat.java (original) +++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextInputFormat.java Thu Aug 16 11:45:49 2007 @@ -99,7 +99,7 @@ BitSet bits = new BitSet(length); for (int j = 0; j < splits.length; j++) { LOG.debug("split["+j+"]= " + splits[j]); - RecordReader reader = + RecordReader<LongWritable, Text> reader = format.getRecordReader(splits[j], job, reporter); try { int count = 0; @@ -184,14 +184,14 @@ private static final Reporter voidReporter = Reporter.NULL; - private static List<Text> readSplit(InputFormat format, + private static List<Text> readSplit(TextInputFormat format, InputSplit split, JobConf job) throws IOException { List<Text> result = new ArrayList<Text>(); - RecordReader reader = format.getRecordReader(split, job, - voidReporter); - LongWritable key = (LongWritable) reader.createKey(); - Text value = (Text) reader.createValue(); + RecordReader<LongWritable, Text> reader = + format.getRecordReader(split, job, voidReporter); + LongWritable key = reader.createKey(); + Text value = reader.createValue(); while (reader.next(key, value)) { result.add(value); value = (Text) reader.createValue(); Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextOutputFormat.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextOutputFormat.java?view=diff&rev=566798&r1=566797&r2=566798 ============================================================================== --- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextOutputFormat.java (original) +++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextOutputFormat.java Thu Aug 16 11:45:49 2007 @@ -54,9 +54,10 @@ // A reporter that does nothing Reporter reporter = Reporter.NULL; - TextOutputFormat theOutputFormat = new TextOutputFormat(); - RecordWriter theRecodWriter = theOutputFormat.getRecordWriter(localFs, job, - file, reporter); + TextOutputFormat<Text, Text> theOutputFormat = + new TextOutputFormat<Text, Text>(); + RecordWriter<Text, Text> theRecordWriter = + theOutputFormat.getRecordWriter(localFs, job, file, reporter); Text key1 = new Text("key1"); Text key2 = new Text("key2"); @@ -64,14 +65,14 @@ Text val2 = new Text("val2"); try { - theRecodWriter.write(key1, val1); - theRecodWriter.write(null, val1); - theRecodWriter.write(key1, null); - theRecodWriter.write(null, null); - theRecodWriter.write(key2, val2); + theRecordWriter.write(key1, val1); + theRecordWriter.write(null, val1); + theRecordWriter.write(key1, null); + theRecordWriter.write(null, null); + theRecordWriter.write(key2, val2); } finally { - theRecodWriter.close(reporter); + theRecordWriter.close(reporter); } File expectedFile = new File(new Path(workDir, file).toString()); StringBuffer expectedOutput = new StringBuffer(); Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java?view=diff&rev=566798&r1=566797&r2=566798 ============================================================================== --- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java (original) +++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java Thu Aug 16 11:45:49 2007 @@ -82,15 +82,18 @@ out.close(); } - public static class DataCopy extends MapReduceBase implements Mapper, - Reducer { - public void map(WritableComparable key, Writable value, - OutputCollector output, Reporter reporter) throws IOException { + public static class DataCopy extends MapReduceBase + implements Mapper<WritableComparable, Text, Text, Text>, + Reducer<Text, Text, Text, Text> { + public void map(WritableComparable key, Text value, + OutputCollector<Text, Text> output, + Reporter reporter) throws IOException { output.collect(new Text(key.toString()), value); } - public void reduce(WritableComparable key, Iterator values, - OutputCollector output, Reporter reporter) throws IOException { + public void reduce(Text key, Iterator<Text> values, + OutputCollector<Text, Text> output, + Reporter reporter) throws IOException { Text dumbKey = new Text(""); while (values.hasNext()) { Text data = (Text) values.next(); Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java?view=diff&rev=566798&r1=566797&r2=566798 ============================================================================== --- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java (original) +++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/lib/aggregate/AggregatorTests.java Thu Aug 16 11:45:49 2007 @@ -24,13 +24,13 @@ public class AggregatorTests extends ValueAggregatorBaseDescriptor { - public ArrayList<Entry> generateKeyValPairs(Object key, Object val) { - ArrayList<Entry> retv = new ArrayList<Entry>(); + public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key, Object val) { + ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>(); String [] words = val.toString().split(" "); String countType; String id; - Entry e; + Entry<Text, Text> e; for (String word: words) { long numVal = Long.parseLong(word); Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/pipes/WordCountInputFormat.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/pipes/WordCountInputFormat.java?view=diff&rev=566798&r1=566797&r2=566798 ============================================================================== --- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/pipes/WordCountInputFormat.java (original) +++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/pipes/WordCountInputFormat.java Thu Aug 16 11:45:49 2007 @@ -30,7 +30,9 @@ * It defines an InputFormat with InputSplits that are just strings. The * RecordReaders are not implemented in Java, naturally... */ -public class WordCountInputFormat implements InputFormat { +public class WordCountInputFormat + implements InputFormat<IntWritable, Text> { + static class WordCountInputSplit implements InputSplit { private String filename; WordCountInputSplit() { } @@ -59,16 +61,17 @@ return result.toArray(new InputSplit[result.size()]); } public void validateInput(JobConf conf) { } - public RecordReader getRecordReader(InputSplit split, JobConf conf, - Reporter reporter) { - return new RecordReader(){ - public boolean next(Writable key, Writable value) throws IOException { + public RecordReader<IntWritable, Text> getRecordReader(InputSplit split, + JobConf conf, + Reporter reporter) { + return new RecordReader<IntWritable, Text>(){ + public boolean next(IntWritable key, Text value) throws IOException { return false; } - public WritableComparable createKey() { + public IntWritable createKey() { return new IntWritable(); } - public Writable createValue() { + public Text createValue() { return new Text(); } public long getPos() { Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordMR.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordMR.java?view=diff&rev=566798&r1=566797&r2=566798 ============================================================================== --- lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordMR.java (original) +++ lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordMR.java Thu Aug 16 11:45:49 2007 @@ -86,14 +86,18 @@ * of numbers in random order, but where each number appears * as many times as we were instructed. */ - static public class RandomGenMapper implements Mapper { + static public class RandomGenMapper implements Mapper<RecInt, RecInt, + RecInt, RecString> { Random r = new Random(); public void configure(JobConf job) { } - public void map(WritableComparable key, Writable val, OutputCollector out, Reporter reporter) throws IOException { - int randomVal = ((RecInt) key).getData(); - int randomCount = ((RecInt) val).getData(); + public void map(RecInt key, + RecInt val, + OutputCollector<RecInt, RecString> out, + Reporter reporter) throws IOException { + int randomVal = key.getData(); + int randomCount = val.getData(); for (int i = 0; i < randomCount; i++) { out.collect(new RecInt(Math.abs(r.nextInt())), @@ -105,18 +109,18 @@ } /** */ - static public class RandomGenReducer implements Reducer { + static public class RandomGenReducer implements Reducer<RecInt, RecString, + RecInt, RecString> { public void configure(JobConf job) { } - public void reduce(WritableComparable key, - Iterator it, - OutputCollector out, - Reporter reporter) - throws IOException { - int keyint = ((RecInt) key).getData(); + public void reduce(RecInt key, + Iterator<RecString> it, + OutputCollector<RecInt, RecString> out, + Reporter reporter) throws IOException { + int keyint = key.getData(); while (it.hasNext()) { - String val = ((RecString) it.next()).getData(); + String val = it.next().getData(); out.collect(new RecInt(Integer.parseInt(val)), new RecString("")); } @@ -141,13 +145,17 @@ * Each key here is a random number, and the count is the * number of times the number was emitted. */ - static public class RandomCheckMapper implements Mapper { + static public class RandomCheckMapper implements Mapper<RecInt, RecString, + RecInt, RecString> { public void configure(JobConf job) { } - public void map(WritableComparable key, Writable val, OutputCollector out, Reporter reporter) throws IOException { - int pos = ((RecInt) key).getData(); - String str = ((RecString) val).getData(); + public void map(RecInt key, + RecString val, + OutputCollector<RecInt, RecString> out, + Reporter reporter) throws IOException { + int pos = key.getData(); + String str = val.getData(); out.collect(new RecInt(pos), new RecString("1")); } public void close() { @@ -155,12 +163,16 @@ } /** */ - static public class RandomCheckReducer implements Reducer { + static public class RandomCheckReducer implements Reducer<RecInt, RecString, + RecInt, RecString> { public void configure(JobConf job) { } - public void reduce(WritableComparable key, Iterator it, OutputCollector out, Reporter reporter) throws IOException { - int keyint = ((RecInt) key).getData(); + public void reduce(RecInt key, + Iterator<RecString> it, + OutputCollector<RecInt, RecString> out, + Reporter reporter) throws IOException { + int keyint = key.getData(); int count = 0; while (it.hasNext()) { it.next(); @@ -180,27 +192,35 @@ * Thus, the map() function is just the identity function * and reduce() just sums. Nothing to see here! */ - static public class MergeMapper implements Mapper { + static public class MergeMapper implements Mapper<RecInt, RecString, + RecInt, RecInt> { public void configure(JobConf job) { } - public void map(WritableComparable key, Writable val, OutputCollector out, Reporter reporter) throws IOException { - int keyint = ((RecInt) key).getData(); - String valstr = ((RecString) val).getData(); + public void map(RecInt key, + RecString val, + OutputCollector<RecInt, RecInt> out, + Reporter reporter) throws IOException { + int keyint = key.getData(); + String valstr = val.getData(); out.collect(new RecInt(keyint), new RecInt(Integer.parseInt(valstr))); } public void close() { } } - static public class MergeReducer implements Reducer { + static public class MergeReducer implements Reducer<RecInt, RecInt, + RecInt, RecInt> { public void configure(JobConf job) { } - public void reduce(WritableComparable key, Iterator it, OutputCollector out, Reporter reporter) throws IOException { - int keyint = ((RecInt) key).getData(); + public void reduce(RecInt key, + Iterator<RecInt> it, + OutputCollector<RecInt, RecInt> out, + Reporter reporter) throws IOException { + int keyint = key.getData(); int total = 0; while (it.hasNext()) { - total += ((RecInt) it.next()).getData(); + total += it.next().getData(); } out.collect(new RecInt(keyint), new RecInt(total)); } Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordWritable.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordWritable.java?view=diff&rev=566798&r1=566797&r2=566798 ============================================================================== --- lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordWritable.java (original) +++ lucene/hadoop/trunk/src/test/org/apache/hadoop/record/TestRecordWritable.java Thu Aug 16 11:45:49 2007 @@ -77,7 +77,8 @@ } // try splitting the file in a variety of sizes - InputFormat format = new SequenceFileInputFormat(); + InputFormat<RecInt, RecBuffer> format = + new SequenceFileInputFormat<RecInt, RecBuffer>(); RecInt key = new RecInt(); RecBuffer value = new RecBuffer(); for (int i = 0; i < 3; i++) { @@ -88,7 +89,7 @@ // check each split BitSet bits = new BitSet(length); for (int j = 0; j < splits.length; j++) { - RecordReader reader = + RecordReader<RecInt, RecBuffer> reader = format.getRecordReader(splits[j], job, Reporter.NULL); try { int count = 0; Modified: lucene/hadoop/trunk/src/test/testjar/ClassWordCount.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/testjar/ClassWordCount.java?view=diff&rev=566798&r1=566797&r2=566798 ============================================================================== --- lucene/hadoop/trunk/src/test/testjar/ClassWordCount.java (original) +++ lucene/hadoop/trunk/src/test/testjar/ClassWordCount.java Thu Aug 16 11:45:49 2007 @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; @@ -46,12 +47,14 @@ * For each line of input, break the line into words and emit them as * (<b>word</b>, <b>1</b>). */ - public static class MapClass extends WordCount.MapClass implements Mapper { + public static class MapClass extends WordCount.MapClass + implements Mapper<LongWritable, Text, Text, IntWritable> { } /** * A reducer class that just emits the sum of the input values. */ - public static class Reduce extends WordCount.Reduce implements Reducer { + public static class Reduce extends WordCount.Reduce + implements Reducer<Text, IntWritable, Text, IntWritable> { } } Modified: lucene/hadoop/trunk/src/test/testjar/ExternalMapperReducer.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/testjar/ExternalMapperReducer.java?view=diff&rev=566798&r1=566797&r2=566798 ============================================================================== --- lucene/hadoop/trunk/src/test/testjar/ExternalMapperReducer.java (original) +++ lucene/hadoop/trunk/src/test/testjar/ExternalMapperReducer.java Thu Aug 16 11:45:49 2007 @@ -32,7 +32,10 @@ import org.apache.hadoop.mapred.Reporter; public class ExternalMapperReducer - implements Mapper, Reducer { + implements Mapper<WritableComparable, Writable, + ExternalWritable, IntWritable>, + Reducer<WritableComparable, Writable, + WritableComparable, IntWritable> { public void configure(JobConf job) { @@ -44,7 +47,8 @@ } public void map(WritableComparable key, Writable value, - OutputCollector output, Reporter reporter) + OutputCollector<ExternalWritable, IntWritable> output, + Reporter reporter) throws IOException { if (value instanceof Text) { @@ -54,8 +58,9 @@ } } - public void reduce(WritableComparable key, Iterator values, - OutputCollector output, Reporter reporter) + public void reduce(WritableComparable key, Iterator<Writable> values, + OutputCollector<WritableComparable, IntWritable> output, + Reporter reporter) throws IOException { int count = 0;