It seems likely you have conflicting versions of hadoop on your classpath.

On Fri, Dec 4, 2015 at 2:52 PM, Prem Sure <premsure...@gmail.com> wrote:

> Getting below exception while executing below program in eclipse.
> any clue on whats wrong here would be helpful
>
> *public* *class* WordCount {
>
> *private* *static* *final* FlatMapFunction<String, String>
> *WORDS_EXTRACTOR* =
>
> *new* *FlatMapFunction<String, String>()* {
>
> @Override
>
> *public* Iterable<String> call(String s) *throws* Exception {
>
> *return* Arrays.*asList*(s.split(" "));
>
> }
>
> };
>
> *private* *static* *final* PairFunction<String, String, Integer>
> *WORDS_MAPPER* =
>
> *new* *PairFunction<String, String, Integer>()* {
>
> @Override
>
> *public* Tuple2<String, Integer> call(String s) *throws* Exception {
>
> *return* *new* Tuple2<String, Integer>(s, 1);
>
> }
>
> };
>
> *private* *static* *final* Function2<Integer, Integer, Integer>
> *WORDS_REDUCER* =
>
> *new* *Function2<Integer, Integer, Integer>()* {
>
> @Override
>
> *public* Integer call(Integer a, Integer b) *throws* Exception {
>
> *return* a + b;
>
> }
>
> };
>
> *public* *static* *void* main(String[] args) {
>
> SparkConf conf = *new* SparkConf().setAppName("spark.WordCount"
> ).setMaster("local");
>
> JavaSparkContext *context* = *new* JavaSparkContext(conf);
>
> JavaRDD<String> file = context.textFile("Input/SampleTextFile.txt");
>
> file.saveAsTextFile("file:///Output/WordCount.txt");
>
> JavaRDD<String> words = file.flatMap(*WORDS_EXTRACTOR*);
>
> JavaPairRDD<String, Integer> pairs = words.mapToPair(*WORDS_MAPPER*);
>
> JavaPairRDD<String, Integer> counter = pairs.reduceByKey(*WORDS_REDUCER*);
>
> counter.foreach(System.*out*::println);
>
> counter.saveAsTextFile("file:///Output/WordCount.txt");
>
> }
>
> }
>
> *Exception in thread "main" java.lang.IncompatibleClassChangeError:
> Implementing class*
>
> at java.lang.ClassLoader.defineClass1(*Native Method*)
>
> at java.lang.ClassLoader.defineClass(Unknown Source)
>
> at java.security.SecureClassLoader.defineClass(Unknown Source)
>
> at java.net.URLClassLoader.defineClass(Unknown Source)
>
> at java.net.URLClassLoader.access$100(Unknown Source)
>
> at java.net.URLClassLoader$1.run(Unknown Source)
>
> at java.net.URLClassLoader$1.run(Unknown Source)
>
> at java.security.AccessController.doPrivileged(*Native Method*)
>
> at java.net.URLClassLoader.findClass(Unknown Source)
>
> at java.lang.ClassLoader.loadClass(Unknown Source)
>
> at sun.misc.Launcher$AppClassLoader.loadClass(Unknown Source)
>
> at java.lang.ClassLoader.loadClass(Unknown Source)
>
> at java.lang.Class.forName0(*Native Method*)
>
> at java.lang.Class.forName(Unknown Source)
>
> at org.apache.spark.mapred.SparkHadoopMapRedUtil$class.firstAvailableClass(
> *SparkHadoopMapRedUtil.scala:61*)
>
> at org.apache.spark.mapred.SparkHadoopMapRedUtil$class.newJobContext(
> *SparkHadoopMapRedUtil.scala:27*)
>
> at org.apache.spark.SparkHadoopWriter.newJobContext(
> *SparkHadoopWriter.scala:39*)
>
> at org.apache.spark.SparkHadoopWriter.getJobContext(
> *SparkHadoopWriter.scala:149*)
>
> at org.apache.spark.SparkHadoopWriter.preSetup(
> *SparkHadoopWriter.scala:63*)
>
> at org.apache.spark.rdd.PairRDDFunctions.saveAsHadoopDataset(
> *PairRDDFunctions.scala:1045*)
>
> at org.apache.spark.rdd.PairRDDFunctions.saveAsHadoopFile(
> *PairRDDFunctions.scala:940*)
>
> at org.apache.spark.rdd.PairRDDFunctions.saveAsHadoopFile(
> *PairRDDFunctions.scala:849*)
>
> at org.apache.spark.rdd.RDD.saveAsTextFile(*RDD.scala:1164*)
>
> at org.apache.spark.api.java.JavaRDDLike$class.saveAsTextFile(
> *JavaRDDLike.scala:443*)
>
> at org.apache.spark.api.java.JavaRDD.saveAsTextFile(*JavaRDD.scala:32*)
>
> at spark.WordCount.main(*WordCount.java:44*)
>

Reply via email to