You are mixing old mapred calls with new mapreduce calls.
Please look for a sample with the following structure:
     @Override
    public void map(Text key, Writable value,
                    Context context)
...
      context.write(key, objWrite);


On Sun, Apr 25, 2010 at 5:12 PM, iman453 <ish...@gmail.com> wrote:

>
> Hi,
>
> I just started playing around with Hadoop, and am running into an error
> while running my first basic program. The error is:
>
> java.io.IOException: Type mismatch in key from map: expected
> org.apache.hadoop.io.Text, recieved org.apache.hadoop.io.LongWritable
>        at
> org.apache.hadoop.mapred.MapTask$MapOutputBuffer.collect(MapTask.java:845).
>
> I've pasted the MapReduce program I'm trying to run below. I'd really
> appreciate any help I could get.
>
> import java.io.IOException;
> import java.util.Iterator;
> import java.util.StringTokenizer;
>
> import org.apache.hadoop.io.LongWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapred.OutputCollector;
> import org.apache.hadoop.mapred.Reporter;
> import org.apache.hadoop.mapred.TextInputFormat;
> import org.apache.hadoop.mapreduce.Mapper;
>
> import java.io.IOException;
> import java.util.StringTokenizer;
>
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.conf.Configured;
> import org.apache.hadoop.fs.Path;
> import org.apache.hadoop.io.LongWritable;
> import org.apache.hadoop.io.IntWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapred.OutputCollector;
> import org.apache.hadoop.mapred.Reporter;
> //import org.apache.hadoop.mapreduce.InputFormat;
> import org.apache.hadoop.mapreduce.Job;
> import org.apache.hadoop.mapreduce.Mapper;
> import org.apache.hadoop.mapreduce.OutputFormat;
> import org.apache.hadoop.mapreduce.Reducer;
> import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
> import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
> import org.apache.hadoop.util.GenericOptionsParser;
> import org.apache.hadoop.util.Tool;
> import org.apache.hadoop.util.ToolRunner;
>
> public class Rating extends Configured implements Tool {
>        public static class RatingMapper extends Mapper<Object, Text, Text,
> Text> {
>
>                private final Text rating = new Text();
>                private final Text movie = new Text();
>
>                public void map(Object key, Text val,
>                                OutputCollector<Text, Text> output, Reporter
> reporter)
>                                throws IOException, InterruptedException {
>
>                        String line = val.toString();
>
>                        if (!line.contains("{")) {
>                                StringTokenizer itr = new
> StringTokenizer(line);
>                                String garbage = itr.nextToken();
>                                garbage = itr.nextToken();
>                                String ratingString = itr.nextToken();
>                                //Integer ratingInt =
> Integer.parseInt(ratingString);
>                        //      String ratingString2 = ratingInt.toString();
>                                rating.set(ratingString);
>
>                                StringTokenizer itr2 = new
> StringTokenizer(line);
>                                garbage = itr.nextToken("\"");
>                                String movieString = itr.nextToken();
>                                movie.set(movieString);
>                                output.collect(rating, movie);
>                        }
>
>                }
>        }
>
>        public static class RatingReducer extends Reducer<Text, Text, Text,
> Text> {
>
>                public void reduce(Text key, Iterator<Text> values,
>                                OutputCollector<Text, Text> output, Reporter
> reporter)
>                                throws IOException {
>
>                        boolean first = true;
>                        StringBuilder toReturn = new StringBuilder();
>                        while (values.hasNext()) {
>                                if (!first)
>                                        toReturn.append(", ");
>                                first = false;
>                                toReturn.append(values.next().toString());
>                        }
>
>                        String keyString = "Movies above " + key.toString()
> + "-";
>                        key.set(keyString);
>                        output.collect(key, new Text(toReturn.toString()));
>                }
>
>        }
>
>        //      public class RatingDriver {
>
>        public int run(String[] args) throws Exception {
>                Configuration conf = new Configuration();
>
>                String[] otherArgs = new GenericOptionsParser(conf, args)
>                                .getRemainingArgs();
>                if (otherArgs.length != 2) {
>                        System.err.println("Usage: wordcount <in> <out>");
>                        System.exit(2);
>                }
>
>                Job job;
>                try {
>                        job = new Job(conf, "IMDB");
>
>                        job.setJarByClass(Rating.class);
>                        job.setMapperClass(RatingMapper.class);
>                        // job.setCombinerClass(IntSumReducer.class);
>                        job.setReducerClass(RatingReducer.class);
>                        job.setOutputKeyClass(Text.class);
>                        job.setOutputValueClass(Text.class);
>                        job.setMapOutputKeyClass(Text.class);
>                        job.setMapOutputValueClass(Text.class);
>        //          job.setInputFormatClass(TextInputFormat.class);
>        //              job.setOutputFormatClass((Class<? extends
> OutputFormat>)
> TextInputFormat.class);
>                        try {
>                                //      FileInputFormat.addInputPath(job,
> new
> Path("/tmp/hadoop-ishan/dfs/input"));
>                                FileInputFormat.addInputPath(job, new
> Path(otherArgs[0]));
>                        } catch (IOException e) {
>                        }
>                        //      FileOutputFormat.setOutputPath(job, new
> Path("/tmp/hadoop-ishan/dfs/output"));
>                        FileOutputFormat.setOutputPath(job, new
> Path(otherArgs[1]));
>                        try {
>                                System.exit(job.waitForCompletion(true) ? 0
> : 1);
>                        } catch (IOException e) {
>                        } catch (InterruptedException e) {
>                        } catch (ClassNotFoundException e) {
>                        }
>                } catch (IOException e1) {
>                }
>                return 1;
>        }
>
>        public static void main(String[] args) throws Exception {
>                int res = ToolRunner.run(new Configuration(), new Rating(),
> args);
>                System.exit(res);
>        }
>
>        //      }
> }
> --
> View this message in context:
> http://old.nabble.com/Type-mismatch-in-key-from-map-tp28359780p28359780.html
> Sent from the HBase User mailing list archive at Nabble.com.
>
>

Reply via email to