[ 
https://issues.apache.org/jira/browse/HIVE-18289?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16297977#comment-16297977
 ] 

liyunzhang commented on HIVE-18289:
-----------------------------------

if running in parquet, the exception is 
{code}
Job failed with java.lang.NoSuchMethodException: 
org.apache.hadoop.io.ArrayWritable.<init>()
FAILED: Execution Error, return code 3 from 
org.apache.hadoop.hive.ql.exec.spark.SparkTask. 
java.util.concurrent.ExecutionException: Exception thrown by job
        at 
org.apache.spark.JavaFutureActionWrapper.getImpl(FutureAction.scala:272)
        at org.apache.spark.JavaFutureActionWrapper.get(FutureAction.scala:277)
        at 
org.apache.hive.spark.client.RemoteDriver$JobWrapper.call(RemoteDriver.java:362)
        at 
org.apache.hive.spark.client.RemoteDriver$JobWrapper.call(RemoteDriver.java:323)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: 
Task 2 in stage 4.0 failed 4 times, most recent failure: Lost task 2.3 in stage 
4.0 (TID 59, bdpe38): java.lang.RuntimeException: 
java.lang.NoSuchMethodException: org.apache.hadoop.io.ArrayWritable.<init>()
        at 
org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:134)
        at org.apache.hadoop.io.WritableUtils.clone(WritableUtils.java:217)
        at 
org.apache.hadoop.hive.ql.exec.spark.MapInput$CopyFunction.call(MapInput.java:85)
        at 
org.apache.hadoop.hive.ql.exec.spark.MapInput$CopyFunction.call(MapInput.java:72)
        at 
org.apache.spark.api.java.JavaPairRDD$$anonfun$pairFunToScalaFun$1.apply(JavaPairRDD.scala:1031)
        at 
org.apache.spark.api.java.JavaPairRDD$$anonfun$pairFunToScalaFun$1.apply(JavaPairRDD.scala:1031)
        at scala.collection.Iterator$$anon$11.next(Iterator.scala:409)
        at 
org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:214)
        at 
org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:919)
        at 
org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:910)
        at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:866)
        at 
org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:910)
        at 
org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:668)
        at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:330)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:281)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
        at 
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:79)
        at 
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:47)
        at org.apache.spark.scheduler.Task.run(Task.scala:85)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.NoSuchMethodException: 
org.apache.hadoop.io.ArrayWritable.<init>()
        at java.lang.Class.getConstructor0(Class.java:3082)
        at java.lang.Class.getDeclaredConstructor(Class.java:2178)
        at 
org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:128)
        ... 24 more

Driver stacktrace:
        at 
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1450)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1438)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1437)
        at 
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
        at 
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1437)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:811)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:811)
        at scala.Option.foreach(Option.scala:257)
        at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:811)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1659)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1618)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1607)
        at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
Caused by: java.lang.RuntimeException: java.lang.NoSuchMethodException: 
org.apache.hadoop.io.ArrayWritable.<init>()
        at 
org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:134)
        at org.apache.hadoop.io.WritableUtils.clone(WritableUtils.java:217)
        at 
org.apache.hadoop.hive.ql.exec.spark.MapInput$CopyFunction.call(MapInput.java:85)
        at 
org.apache.hadoop.hive.ql.exec.spark.MapInput$CopyFunction.call(MapInput.java:72)
        at 
org.apache.spark.api.java.JavaPairRDD$$anonfun$pairFunToScalaFun$1.apply(JavaPairRDD.scala:1031)
        at 
org.apache.spark.api.java.JavaPairRDD$$anonfun$pairFunToScalaFun$1.apply(JavaPairRDD.scala:1031)
        at scala.collection.Iterator$$anon$11.next(Iterator.scala:409)
        at 
org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:214)
        at 
org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:919)
        at 
org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:910)
        at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:866)
        at 
org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:910)
        at 
org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:668)
        at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:330)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:281)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
        at 
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:79)
        at 
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:47)
        at org.apache.spark.scheduler.Task.run(Task.scala:85)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.NoSuchMethodException: 
org.apache.hadoop.io.ArrayWritable.<init>()
        at java.lang.Class.getConstructor0(Class.java:3082)
        at java.lang.Class.getDeclaredConstructor(Class.java:2178)
        at 
org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:128)
        ... 24 more

{code}

There is no empty constructor for org.apache.hadoop.io.ArrayWritable.  

> Fix jar dependency when enable rdd cache in Hive on Spark
> ---------------------------------------------------------
>
>                 Key: HIVE-18289
>                 URL: https://issues.apache.org/jira/browse/HIVE-18289
>             Project: Hive
>          Issue Type: Bug
>            Reporter: liyunzhang
>            Assignee: liyunzhang
>
> running DS/query28 when enabling HIVE-17486's 4th patch
> on tpcds_bin_partitioned_orc_10 whether on spark local or yarn mode
> command
> {code}
> set spark.local=yarn-client;
> echo 'use tpcds_bin_partitioned_orc_10;source query28.sql;'|hive --hiveconf 
> spark.app.name=query28.sql  --hiveconf hive.spark.optimize.shared.work=true 
> -i testbench.settings -i query28.sql.setting
> {code}
> the exception 
> {code}
> ava.lang.RuntimeException: java.lang.NoSuchMethodException: 
> org.apache.hadoop.hive.ql.io.orc.OrcStruct.<init>()
> 748678         at 
> org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:134) 
> ~[hadoop-common-2.7.3.jar:?]
> 748679         at 
> org.apache.hadoop.io.WritableUtils.clone(WritableUtils.java:217) 
> ~[hadoop-common-2.7.3.jar:?]
> 748680         at 
> org.apache.hadoop.hive.ql.exec.spark.MapInput$CopyFunction.call(MapInput.java:85)
>  ~[hive-exec-3.0.0-SNAPSHOT.jar:3.0.       0-SNAPSHOT]
> 748681         at 
> org.apache.hadoop.hive.ql.exec.spark.MapInput$CopyFunction.call(MapInput.java:72)
>  ~[hive-exec-3.0.0-SNAPSHOT.jar:3.0.       0-SNAPSHOT]
> 748682         at 
> org.apache.spark.api.java.JavaPairRDD$$anonfun$pairFunToScalaFun$1.apply(JavaPairRDD.scala:1031)
>  ~[spark-core_2.11-2.       0.0.jar:2.0.0]
> 748683         at 
> org.apache.spark.api.java.JavaPairRDD$$anonfun$pairFunToScalaFun$1.apply(JavaPairRDD.scala:1031)
>  ~[spark-core_2.11-2.       0.0.jar:2.0.0]
> 748684         at scala.collection.Iterator$$anon$11.next(Iterator.scala:409) 
> ~[scala-library-2.11.8.jar:?]
> 748685         at 
> org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:214)
>  ~[spark-core_2.11-2.0.0.jar:2.       0.0]
> 748686         at 
> org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:919)
>  ~[spark-core_2.11-2.0.0.       jar:2.0.0]
> 748687         at 
> org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:910)
>  ~[spark-core_2.11-2.0.0.       jar:2.0.0]
> 748688         at 
> org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:866) 
> ~[spark-core_2.11-2.0.0.jar:2.0.0]
> 748689         at 
> org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:910) 
> ~[spark-core_2.11-2.0.0.jar:2.0.0]
> 748690         at 
> org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:668) 
> ~[spark-core_2.11-2.0.0.jar:2.0.0]
> 748691         at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:330) 
> ~[spark-core_2.11-2.0.0.jar:2.0.0]
> 748692         at org.apache.spark.rdd.RDD.iterator(RDD.scala:281) 
> ~[spark-core_2.11-2.0.0.jar:2.0.0]
> 748693         at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) 
> ~[spark-core_2.11-2.0.0.jar:2.0.0]
> 748694         at 
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319) 
> ~[spark-core_2.11-2.0.0.jar:2.0.0]
> 748695         at org.apache.spark.rdd.RDD.iterator(RDD.scala:283) 
> ~[spark-core_2.11-2.0.0.jar:2.0.0]
> 748696         at 
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:79) 
> ~[spark-core_2.11-2
> {code}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to