[
https://issues.apache.org/jira/browse/SPARK-16168?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15347566#comment-15347566
]
AnfengYuan commented on SPARK-16168:
------------------------------------
Ok, to make it clear, I can reproduce it with following steps:
1. use hive(version: 1.2.1) to create an orc table
{code}create table testorc(id bigint, name string) stored as orc;{code}
2. insert one line to this table
{code}insert into testorc values(1, '1');{code}
3. start spark-sql shell, and desc this table, I got
{code}
spark-sql> desc testorc;
16/06/24 10:20:03 INFO execution.SparkSqlParser: Parsing command: desc testorc
16/06/24 10:20:03 INFO spark.SparkContext: Starting job: processCmd at
CliDriver.java:376
16/06/24 10:20:03 INFO scheduler.DAGScheduler: Got job 1 (processCmd at
CliDriver.java:376) with 1 output partitions
16/06/24 10:20:03 INFO scheduler.DAGScheduler: Final stage: ResultStage 1
(processCmd at CliDriver.java:376)
16/06/24 10:20:03 INFO scheduler.DAGScheduler: Parents of final stage: List()
16/06/24 10:20:03 INFO scheduler.DAGScheduler: Missing parents: List()
16/06/24 10:20:03 INFO scheduler.DAGScheduler: Submitting ResultStage 1
(MapPartitionsRDD[5] at processCmd at CliDriver.java:376), which has no missing
parents
16/06/24 10:20:03 INFO memory.MemoryStore: Block broadcast_1 stored as values
in memory (estimated size 4.2 KB, free 912.3 MB)
16/06/24 10:20:03 INFO memory.MemoryStore: Block broadcast_1_piece0 stored as
bytes in memory (estimated size 2.5 KB, free 912.3 MB)
16/06/24 10:20:03 INFO storage.BlockManagerInfo: Added broadcast_1_piece0 in
memory on 192.168.178.37:2874 (size: 2.5 KB, free: 912.3 MB)
16/06/24 10:20:03 INFO spark.SparkContext: Created broadcast 1 from broadcast
at DAGScheduler.scala:996
16/06/24 10:20:03 INFO scheduler.DAGScheduler: Submitting 1 missing tasks from
ResultStage 1 (MapPartitionsRDD[5] at processCmd at CliDriver.java:376)
16/06/24 10:20:03 INFO scheduler.TaskSchedulerImpl: Adding task set 1.0 with 1
tasks
16/06/24 10:20:03 INFO scheduler.TaskSetManager: Starting task 0.0 in stage 1.0
(TID 1, localhost, partition 0, PROCESS_LOCAL, 5571 bytes)
16/06/24 10:20:03 INFO executor.Executor: Running task 0.0 in stage 1.0 (TID 1)
16/06/24 10:20:03 INFO codegen.CodeGenerator: Code generated in 23.231239 ms
16/06/24 10:20:03 INFO executor.Executor: Finished task 0.0 in stage 1.0 (TID
1). 1046 bytes result sent to driver
16/06/24 10:20:03 INFO scheduler.TaskSetManager: Finished task 0.0 in stage 1.0
(TID 1) in 78 ms on localhost (1/1)
16/06/24 10:20:03 INFO scheduler.TaskSchedulerImpl: Removed TaskSet 1.0, whose
tasks have all completed, from pool
16/06/24 10:20:03 INFO scheduler.DAGScheduler: ResultStage 1 (processCmd at
CliDriver.java:376) finished in 0.079 s
16/06/24 10:20:03 INFO scheduler.DAGScheduler: Job 1 finished: processCmd at
CliDriver.java:376, took 0.097624 s
id bigint NULL
name string NULL
Time taken: 0.28 seconds, Fetched 2 row(s)
16/06/24 10:20:03 INFO CliDriver: Time taken: 0.28 seconds, Fetched 2 row(s)
{code}
4. query this table
{code}select * from testorc;{code}
then I got error
{code}
Driver stacktrace:
at
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1429)
at
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1417)
at
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1416)
at
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1416)
at
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
at
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
at scala.Option.foreach(Option.scala:257)
at
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:802)
at
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1638)
at
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1597)
at
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1586)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
at
org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:628)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1872)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1885)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1898)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1912)
at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:889)
at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:358)
at org.apache.spark.rdd.RDD.collect(RDD.scala:888)
at
org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:290)
at
org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:310)
at
org.apache.spark.sql.execution.QueryExecution$$anonfun$hiveResultString$3.apply(QueryExecution.scala:131)
at
org.apache.spark.sql.execution.QueryExecution$$anonfun$hiveResultString$3.apply(QueryExecution.scala:130)
at
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57)
at
org.apache.spark.sql.execution.QueryExecution.hiveResultString(QueryExecution.scala:130)
at
org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:63)
at
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:323)
at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376)
at
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver$.main(SparkSQLCLIDriver.scala:239)
at
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at
org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:729)
at
org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:185)
at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:210)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:124)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.lang.IllegalArgumentException: Field "id" does not exist.
at
org.apache.spark.sql.types.StructType$$anonfun$fieldIndex$1.apply(StructType.scala:254)
at
org.apache.spark.sql.types.StructType$$anonfun$fieldIndex$1.apply(StructType.scala:254)
at scala.collection.MapLike$class.getOrElse(MapLike.scala:128)
at scala.collection.AbstractMap.getOrElse(Map.scala:59)
at
org.apache.spark.sql.types.StructType.fieldIndex(StructType.scala:253)
at
org.apache.spark.sql.hive.orc.OrcRelation$$anonfun$10.apply(OrcFileFormat.scala:379)
at
org.apache.spark.sql.hive.orc.OrcRelation$$anonfun$10.apply(OrcFileFormat.scala:379)
at
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.Iterator$class.foreach(Iterator.scala:893)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
at org.apache.spark.sql.types.StructType.foreach(StructType.scala:95)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
at org.apache.spark.sql.types.StructType.map(StructType.scala:95)
at
org.apache.spark.sql.hive.orc.OrcRelation$.setRequiredColumns(OrcFileFormat.scala:379)
at
org.apache.spark.sql.hive.orc.OrcFileFormat$$anonfun$buildReader$2.apply(OrcFileFormat.scala:135)
at
org.apache.spark.sql.hive.orc.OrcFileFormat$$anonfun$buildReader$2.apply(OrcFileFormat.scala:124)
at
org.apache.spark.sql.execution.datasources.FileFormat$$anon$1.apply(fileSourceInterfaces.scala:293)
at
org.apache.spark.sql.execution.datasources.FileFormat$$anon$1.apply(fileSourceInterfaces.scala:277)
at
org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:114)
at
org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:91)
at
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
Source)
at
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
at
org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:246)
at
org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:240)
at
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:780)
at
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:780)
at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
at org.apache.spark.scheduler.Task.run(Task.scala:85)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
{code}
> Spark sql can not read ORC table
> --------------------------------
>
> Key: SPARK-16168
> URL: https://issues.apache.org/jira/browse/SPARK-16168
> Project: Spark
> Issue Type: Bug
> Components: SQL
> Affects Versions: 2.0.0, 2.0.1, 2.1.0
> Reporter: AnfengYuan
>
> When using spark-sql shell to query orc table, exceptions are thrown:
> My table was generated by the tool in
> https://github.com/hortonworks/hive-testbench
> {code}
> Driver stacktrace:
> at
> org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1429)
> at
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1417)
> at
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1416)
> at
> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
> at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
> at
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1416)
> at
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
> at
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
> at scala.Option.foreach(Option.scala:257)
> at
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:802)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1638)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1597)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1586)
> at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
> at
> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:628)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:1872)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:1885)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:1898)
> at
> org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:347)
> at
> org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:39)
> at
> org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:310)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$hiveResultString$3.apply(QueryExecution.scala:131)
> at
> org.apache.spark.sql.execution.QueryExecution$$anonfun$hiveResultString$3.apply(QueryExecution.scala:130)
> at
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57)
> at
> org.apache.spark.sql.execution.QueryExecution.hiveResultString(QueryExecution.scala:130)
> at
> org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:63)
> at
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:323)
> at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376)
> at
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver$.main(SparkSQLCLIDriver.scala:239)
> at
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> at
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:497)
> at
> org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:729)
> at
> org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:185)
> at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:210)
> at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:124)
> at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
> Caused by: java.lang.IllegalArgumentException: Field "i_item_sk" does not
> exist.
> at
> org.apache.spark.sql.types.StructType$$anonfun$fieldIndex$1.apply(StructType.scala:254)
> at
> org.apache.spark.sql.types.StructType$$anonfun$fieldIndex$1.apply(StructType.scala:254)
> at scala.collection.MapLike$class.getOrElse(MapLike.scala:128)
> at scala.collection.AbstractMap.getOrElse(Map.scala:59)
> at
> org.apache.spark.sql.types.StructType.fieldIndex(StructType.scala:253)
> at
> org.apache.spark.sql.hive.orc.OrcRelation$$anonfun$10.apply(OrcFileFormat.scala:379)
> at
> org.apache.spark.sql.hive.orc.OrcRelation$$anonfun$10.apply(OrcFileFormat.scala:379)
> at
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
> at
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
> at scala.collection.Iterator$class.foreach(Iterator.scala:893)
> at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
> at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
> at org.apache.spark.sql.types.StructType.foreach(StructType.scala:95)
> at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
> at org.apache.spark.sql.types.StructType.map(StructType.scala:95)
> at
> org.apache.spark.sql.hive.orc.OrcRelation$.setRequiredColumns(OrcFileFormat.scala:379)
> at
> org.apache.spark.sql.hive.orc.OrcFileFormat$$anonfun$buildReader$2.apply(OrcFileFormat.scala:135)
> at
> org.apache.spark.sql.hive.orc.OrcFileFormat$$anonfun$buildReader$2.apply(OrcFileFormat.scala:124)
> at
> org.apache.spark.sql.execution.datasources.FileFormat$$anon$1.apply(fileSourceInterfaces.scala:293)
> at
> org.apache.spark.sql.execution.datasources.FileFormat$$anon$1.apply(fileSourceInterfaces.scala:277)
> at
> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:114)
> at
> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:91)
> at
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
> Source)
> at
> org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
> at
> org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
> at
> org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:246)
> at
> org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:240)
> at
> org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:780)
> at
> org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:780)
> at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
> at org.apache.spark.scheduler.Task.run(Task.scala:85)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
> at java.lang.Thread.run(Thread.java:745)
> {code}
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]