[ https://issues.apache.org/jira/browse/SPARK-4023?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14177934#comment-14177934 ]
Apache Spark commented on SPARK-4023: ------------------------------------- User 'davies' has created a pull request for this issue: https://github.com/apache/spark/pull/2870 > PySpark's stat.Statistics is broken > ----------------------------------- > > Key: SPARK-4023 > URL: https://issues.apache.org/jira/browse/SPARK-4023 > Project: Spark > Issue Type: Bug > Components: MLlib, PySpark > Affects Versions: 1.2.0 > Reporter: Xiangrui Meng > Assignee: Davies Liu > Priority: Critical > > {code} > from pyspark.mllib.stat import Statistics > from pyspark.mllib.random import RandomRDDs > data = RandomRDDs.uniformVectorRDD(sc, 100000, 10, 10) > Statistics.colStats(data) > {code} > throws > {code} > Py4JJavaError: An error occurred while calling o37.colStats. > : org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 > in stage 0.0 failed 1 times, most recent failure: Lost task 2.0 in stage 0.0 > (TID 2, localhost): net.razorvine.pickle.PickleException: expected zero > arguments for construction of ClassDict (for > numpy.core.multiarray._reconstruct) > > net.razorvine.pickle.objects.ClassDictConstructor.construct(ClassDictConstructor.java:23) > net.razorvine.pickle.Unpickler.load_reduce(Unpickler.java:617) > net.razorvine.pickle.Unpickler.dispatch(Unpickler.java:170) > net.razorvine.pickle.Unpickler.load(Unpickler.java:84) > net.razorvine.pickle.Unpickler.loads(Unpickler.java:97) > > org.apache.spark.mllib.api.python.SerDe$$anonfun$pythonToJava$1$$anonfun$apply$1.apply(PythonMLLibAPI.scala:695) > > org.apache.spark.mllib.api.python.SerDe$$anonfun$pythonToJava$1$$anonfun$apply$1.apply(PythonMLLibAPI.scala:694) > scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371) > scala.collection.Iterator$class.foreach(Iterator.scala:727) > scala.collection.AbstractIterator.foreach(Iterator.scala:1157) > > scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:144) > scala.collection.AbstractIterator.foldLeft(Iterator.scala:1157) > > scala.collection.TraversableOnce$class.aggregate(TraversableOnce.scala:201) > scala.collection.AbstractIterator.aggregate(Iterator.scala:1157) > > org.apache.spark.mllib.rdd.RDDFunctions$$anonfun$4.apply(RDDFunctions.scala:99) > > org.apache.spark.mllib.rdd.RDDFunctions$$anonfun$4.apply(RDDFunctions.scala:99) > > org.apache.spark.mllib.rdd.RDDFunctions$$anonfun$5.apply(RDDFunctions.scala:100) > > org.apache.spark.mllib.rdd.RDDFunctions$$anonfun$5.apply(RDDFunctions.scala:100) > org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:599) > org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:599) > > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262) > org.apache.spark.rdd.RDD.iterator(RDD.scala:229) > > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35) > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262) > org.apache.spark.rdd.RDD.iterator(RDD.scala:229) > > org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68) > > org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41) > org.apache.spark.scheduler.Task.run(Task.scala:56) > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:181) > > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > java.lang.Thread.run(Thread.java:744) > {code} -- This message was sent by Atlassian JIRA (v6.3.4#6332) --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org For additional commands, e-mail: issues-h...@spark.apache.org