[ 
https://issues.apache.org/jira/browse/SPARK-9832?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14693982#comment-14693982
 ] 

Davies Liu commented on SPARK-9832:
-----------------------------------

[~marmbrus] It also fail if unsafe and codegen are disabled, should we create a 
separate JIRA for it?

{code}
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in 
stage 43.0 failed 4 times, most recent failure: Lost task 0.3 in stage 43.0 
(TID 519, 10.0.237.253): java.io.InvalidClassException: 
org.apache.spark.sql.execution.joins.UniqueKeyHashedRelation; no valid 
constructor
        at 
java.io.ObjectStreamClass$ExceptionInfo.newInvalidClassException(ObjectStreamClass.java:150)
        at 
java.io.ObjectStreamClass.checkDeserialize(ObjectStreamClass.java:768)
        at 
java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1772)
        at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1350)
        at java.io.ObjectInputStream.readObject(ObjectInputStream.java:370)
        at 
org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:72)
        at 
org.apache.spark.broadcast.TorrentBroadcast$.unBlockifyObject(TorrentBroadcast.scala:217)
        at 
org.apache.spark.broadcast.TorrentBroadcast$$anonfun$readBroadcastBlock$1.apply(TorrentBroadcast.scala:178)
        at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1276)
        at 
org.apache.spark.broadcast.TorrentBroadcast.readBroadcastBlock(TorrentBroadcast.scala:165)
        at 
org.apache.spark.broadcast.TorrentBroadcast._value$lzycompute(TorrentBroadcast.scala:64)
        at 
org.apache.spark.broadcast.TorrentBroadcast._value(TorrentBroadcast.scala:64)
        at 
org.apache.spark.broadcast.TorrentBroadcast.getValue(TorrentBroadcast.scala:88)
        at org.apache.spark.broadcast.Broadcast.value(Broadcast.scala:70)
        at 
org.apache.spark.sql.execution.joins.BroadcastHashJoin$$anonfun$2.apply(BroadcastHashJoin.scala:91)
        at 
org.apache.spark.sql.execution.joins.BroadcastHashJoin$$anonfun$2.apply(BroadcastHashJoin.scala:90)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:706)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:706)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
        at 
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
        at 
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
        at org.apache.spark.scheduler.Task.run(Task.scala:88)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)

Driver stacktrace:
        at 
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1256)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1247)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1246)
        at 
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
        at 
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1246)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:681)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:681)
        at scala.Option.foreach(Option.scala:236)
        at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:681)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1466)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1428)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1417)
        at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
        at 
org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:554)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:1795)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:1915)
        at org.apache.spark.rdd.RDD$$anonfun$reduce$1.apply(RDD.scala:1003)
        at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:147)
        at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:108)
        at org.apache.spark.rdd.RDD.withScope(RDD.scala:306)
        at org.apache.spark.rdd.RDD.reduce(RDD.scala:985)
        at org.apache.spark.rdd.RDD$$anonfun$takeOrdered$1.apply(RDD.scala:1366)
        at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:147)
        at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:108)
        at org.apache.spark.rdd.RDD.withScope(RDD.scala:306)
        at org.apache.spark.rdd.RDD.takeOrdered(RDD.scala:1353)
        at 
org.apache.spark.sql.execution.TakeOrderedAndProject.collectData(basicOperators.scala:234)
        at 
org.apache.spark.sql.execution.TakeOrderedAndProject.executeCollect(basicOperators.scala:240)
        at 
org.apache.spark.sql.DataFrame$$anonfun$collect$1.apply(DataFrame.scala:1383)
        at 
org.apache.spark.sql.DataFrame$$anonfun$collect$1.apply(DataFrame.scala:1383)
        at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
        at 
org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:1899)
        at org.apache.spark.sql.DataFrame.collect(DataFrame.scala:1382)
        at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1312)
        at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1375)
        at 
com.databricks.backend.daemon.driver.OutputAggregator$.withOutputAggregation0(OutputAggregator.scala:70)
        at 
com.databricks.backend.daemon.driver.OutputAggregator$.withOutputAggregation(OutputAggregator.scala:42)
        at 
com.databricks.backend.daemon.driver.ScalaDriverLocal$$anonfun$repl$2.apply(ScalaDriverLocal.scala:171)
        at 
com.databricks.backend.daemon.driver.ScalaDriverLocal$$anonfun$repl$2.apply(ScalaDriverLocal.scala:164)
        at scala.Option.map(Option.scala:145)
        at 
com.databricks.backend.daemon.driver.ScalaDriverLocal.repl(ScalaDriverLocal.scala:164)
        at 
com.databricks.backend.daemon.driver.DriverLocal.execute(DriverLocal.scala:175)
        at 
com.databricks.backend.daemon.driver.DriverWrapper$$anonfun$3.apply(DriverWrapper.scala:484)
        at 
com.databricks.backend.daemon.driver.DriverWrapper$$anonfun$3.apply(DriverWrapper.scala:484)
        at scala.util.Try$.apply(Try.scala:161)
        at 
com.databricks.backend.daemon.driver.DriverWrapper.executeCommand(DriverWrapper.scala:481)
        at 
com.databricks.backend.daemon.driver.DriverWrapper.runInner(DriverWrapper.scala:383)
        at 
com.databricks.backend.daemon.driver.DriverWrapper.run(DriverWrapper.scala:194)
        at java.lang.Thread.run(Thread.java:745)
{code}

> TPCDS Q98 Fails
> ---------------
>
>                 Key: SPARK-9832
>                 URL: https://issues.apache.org/jira/browse/SPARK-9832
>             Project: Spark
>          Issue Type: Sub-task
>          Components: SQL
>            Reporter: Michael Armbrust
>            Assignee: Davies Liu
>            Priority: Blocker
>
> {code}
> select
>   i_item_desc,
>   i_category,
>   i_class,
>   i_current_price,
>   sum(ss_ext_sales_price) as itemrevenue
>   -- sum(ss_ext_sales_price) * 100 / sum(sum(ss_ext_sales_price)) over 
> (partition by i_class) as revenueratio
> from
>   store_sales
>   join item on (store_sales.ss_item_sk = item.i_item_sk)
>   join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
> where
>   i_category in('Jewelry', 'Sports', 'Books')
>   -- and d_date between cast('2001-01-12' as date) and (cast('2001-01-12' as 
> date) + 30)
>   -- and d_date between '2001-01-12' and '2001-02-11'
>   -- and ss_date between '2001-01-12' and '2001-02-11'
>   -- and ss_sold_date_sk between 2451922 and 2451952  -- partition key filter
>   and ss_sold_date_sk between 2451911 and 2451941  -- partition key filter (1 
> calendar month)
>   and d_date between '2001-01-01' and '2001-01-31'
> group by
>   i_item_id,
>   i_item_desc,
>   i_category,
>   i_class,
>   i_current_price
> order by
>   i_category,
>   i_class,
>   i_item_id,
>   i_item_desc
>   -- revenueratio
> limit 1000
> {code}
> {code}
> Job aborted due to stage failure: Task 11 in stage 62.0 failed 4 times, most 
> recent failure: Lost task 11.3 in stage 62.0 (TID 5289, 10.0.227.73): 
> java.lang.IllegalArgumentException: Unscaled value too large for precision
>       at org.apache.spark.sql.types.Decimal.set(Decimal.scala:76)
>       at org.apache.spark.sql.types.Decimal$.apply(Decimal.scala:338)
>       at org.apache.spark.sql.types.Decimal.apply(Decimal.scala)
>       at 
> org.apache.spark.sql.catalyst.expressions.UnsafeRow.getDecimal(UnsafeRow.java:386)
>       at 
> org.apache.spark.sql.catalyst.expressions.JoinedRow.getDecimal(JoinedRow.scala:97)
>       at 
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$SpecificUnsafeProjection.apply(Unknown
>  Source)
>       at 
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$SpecificUnsafeProjection.apply(Unknown
>  Source)
>       at 
> org.apache.spark.sql.execution.joins.HashJoin$$anon$1.next(HashJoin.scala:101)
>       at 
> org.apache.spark.sql.execution.joins.HashJoin$$anon$1.next(HashJoin.scala:74)
>       at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>       at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>       at 
> org.apache.spark.sql.execution.joins.HashJoin$$anon$1.fetchNext(HashJoin.scala:115)
>       at 
> org.apache.spark.sql.execution.joins.HashJoin$$anon$1.hasNext(HashJoin.scala:93)
>       at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
>       at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
>       at 
> org.apache.spark.sql.execution.aggregate.TungstenAggregationIterator.processInputs(TungstenAggregationIterator.scala:353)
>       at 
> org.apache.spark.sql.execution.aggregate.TungstenAggregationIterator.<init>(TungstenAggregationIterator.scala:587)
>       at 
> org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1$$anonfun$1.apply(TungstenAggregate.scala:72)
>       at 
> org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1$$anonfun$1.apply(TungstenAggregate.scala:64)
>       at 
> org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:706)
>       at 
> org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:706)
>       at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
>       at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
>       at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
>       at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
>       at 
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
>       at 
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
>       at org.apache.spark.scheduler.Task.run(Task.scala:88)
>       at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
>       at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>       at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>       at java.lang.Thread.run(Thread.java:745)
> {code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to