Cheng Lian created SPARK-10005:
----------------------------------
Summary: Parquet reader doesn't handle schema merging properly for
nested structs
Key: SPARK-10005
URL: https://issues.apache.org/jira/browse/SPARK-10005
Project: Spark
Issue Type: Bug
Components: SQL
Affects Versions: 1.5.0
Reporter: Cheng Lian
Assignee: Cheng Lian
Priority: Blocker
Spark shell snippet to reproduce this issue:
{code}
import sqlContext.implicits._
val path = "file:///tmp/foo"
(0 until 3).map(i => Tuple1((s"a_$i",
s"b_$i"))).toDF().coalesce(1).write.mode("overwrite").parquet(path)
(0 until 3).map(i => Tuple1((s"a_$i", s"b_$i",
s"c_$i"))).toDF().coalesce(1).write.mode("append").parquet(path)
sqlContext.read.option("schemaMerging", "true").parquet(path).show()
{code}
Exception:
{noformat}
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in
stage 39.0 failed 1 times, most recent failure: Lost task 0.0 in stage 39.0
(TID 122, localhost): org.apache.parquet.io.ParquetDecodingException: Can not
read value at 0 in block -1 in file
file:/tmp/foo/part-r-00000-ba9dc7cf-3210-4006-9cf7-02c3d57483cd.gz.parquet
at
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:228)
at
org.apache.parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:201)
at
org.apache.spark.rdd.SqlNewHadoopRDD$$anon$1.hasNext(SqlNewHadoopRDD.scala:168)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:308)
at scala.collection.Iterator$class.foreach(Iterator.scala:727)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
at
scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
at
scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
at
scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
at scala.collection.AbstractIterator.to(Iterator.scala:1157)
at
scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
at
scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
at
org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:215)
at
org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:215)
at
org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1826)
at
org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1826)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
at org.apache.spark.scheduler.Task.run(Task.scala:88)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.ArrayIndexOutOfBoundsException: 2
at
org.apache.spark.sql.execution.datasources.parquet.CatalystRowConverter.getConverter(CatalystRowConverter.scala:136)
at
org.apache.parquet.io.RecordReaderImplementation.<init>(RecordReaderImplementation.java:269)
at
org.apache.parquet.io.MessageColumnIO$1.visit(MessageColumnIO.java:134)
at
org.apache.parquet.io.MessageColumnIO$1.visit(MessageColumnIO.java:99)
at
org.apache.parquet.filter2.compat.FilterCompat$NoOpFilter.accept(FilterCompat.java:154)
at
org.apache.parquet.io.MessageColumnIO.getRecordReader(MessageColumnIO.java:99)
at
org.apache.parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:137)
at
org.apache.parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:208)
... 25 more
{noformat}
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]