[ https://issues.apache.org/jira/browse/SPARK-27994?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16860442#comment-16860442 ]
Nicolas Pascal commented on SPARK-27994: ---------------------------------------- I'll push a PR with failing test cases to reproduce the issue > Spark Avro Failed to read logical type decimal backed by bytes > -------------------------------------------------------------- > > Key: SPARK-27994 > URL: https://issues.apache.org/jira/browse/SPARK-27994 > Project: Spark > Issue Type: Bug > Components: SQL > Affects Versions: 2.4.0 > Reporter: Nicolas Pascal > Priority: Major > > Fields with this following schema provokes Spark to fail reading the Avro > file. > {noformat} > > {"name":"process_insert_id","type":["null",{"type":"bytes","logicalType":"decimal","precision":10,"scale":0} > {noformat} > The following record is failing: > {code:java} > Array[Byte] [32 30 30 30 31 31 30 39 37 34] > actual: BigDecimal 237007240188420354029364 > expected: 2000110974 > {code} > The following code in Spark Avro Library 2.4.0 in the > org.apache.spark.sql.avro.AvroDeserializer line 149 > {noformat} > val bigDecimal = > decimalConversions.fromFixed(value.asInstanceOf[GenericFixed], avroType, > LogicalTypes.decimal(d.precision, d.scale)) > {noformat} > The avro file is readable and produces expected values when converted to json > using the Apache Avro tool jar > (https://search.maven.org/artifact/org.apache.avro/avro-tools/1.8.2/jar) > Full stacktrace bellow: > {noformat} > 19/04/17 05:50:45 INFO Client: > client token: N/A > diagnostics: User class threw exception: > org.apache.spark.SparkException: Job aborted. > at > org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:196) > at > org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:159) > at > org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:104) > at > org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:102) > at > org.apache.spark.sql.execution.command.DataWritingCommandExec.doExecute(commands.scala:122) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127) > at > org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155) > at > org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at > org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152) > at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127) > at > org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:80) > at > org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:80) > at > org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:668) > at > org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:668) > at > org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78) > at > org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125) > at > org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73) > at > org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:668) > at > org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:276) > at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:270) > at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:228) > at > org.apache.spark.sql.DataFrameWriter.parquet(DataFrameWriter.scala:557) > at au.com.nbnco.io.Io$.writeParquet(Io.scala:38) > at au.com.nbnco.fwk.Outputs$.write(Output.scala:27) > at au.com.nbnco.fwk.Context.write(Context.scala:41) > at > au.com.nbnco.job.merge.MergeToActiveDatasetJob$.run(MergeToActiveDatasetJob.scala:10) > at > au.com.nbnco.fwk.SparkJobRunner$.au$com$nbnco$fwk$SparkJobRunner$$executeJobRunner(SparkJobRunner.scala:63) > at > au.com.nbnco.fwk.SparkJobRunner$$anonfun$2$$anonfun$apply$1.apply$mcV$sp(SparkJobRunner.scala:40) > at > au.com.nbnco.fwk.SparkJobRunner$$anonfun$2$$anonfun$apply$1.apply(SparkJobRunner.scala:37) > at > au.com.nbnco.fwk.SparkJobRunner$$anonfun$2$$anonfun$apply$1.apply(SparkJobRunner.scala:37) > at > scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24) > at > scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) > at java.lang.Thread.run(Thread.java:748) > Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: > Task 5 in stage 10.0 failed 4 times, most recent failure: Lost task 5.3 in > stage 10.0 (TID 77, ip-10-11-100-120.aws.nbndc.local, executor 5): > java.lang.IllegalArgumentException: Unscaled value too large for precision > at org.apache.spark.sql.types.Decimal.set(Decimal.scala:79) > at org.apache.spark.sql.types.Decimal$.apply(Decimal.scala:456) > at > org.apache.spark.sql.avro.AvroDeserializer.org$apache$spark$sql$avro$AvroDeserializer$$createDecimal(AvroDeserializer.scala:285) > at > org.apache.spark.sql.avro.AvroDeserializer$$anonfun$org$apache$spark$sql$avro$AvroDeserializer$$newWriter$17.apply(AvroDeserializer.scala:157) > at > org.apache.spark.sql.avro.AvroDeserializer$$anonfun$org$apache$spark$sql$avro$AvroDeserializer$$newWriter$17.apply(AvroDeserializer.scala:154) > at > org.apache.spark.sql.avro.AvroDeserializer$$anonfun$8.apply(AvroDeserializer.scala:313) > at > org.apache.spark.sql.avro.AvroDeserializer$$anonfun$8.apply(AvroDeserializer.scala:309) > at > org.apache.spark.sql.avro.AvroDeserializer$$anonfun$getRecordWriter$1.apply(AvroDeserializer.scala:331) > at > org.apache.spark.sql.avro.AvroDeserializer$$anonfun$getRecordWriter$1.apply(AvroDeserializer.scala:328) > at > org.apache.spark.sql.avro.AvroDeserializer$$anonfun$3.apply(AvroDeserializer.scala:56) > at > org.apache.spark.sql.avro.AvroDeserializer$$anonfun$3.apply(AvroDeserializer.scala:54) > at > org.apache.spark.sql.avro.AvroDeserializer.deserialize(AvroDeserializer.scala:70) > at > org.apache.spark.sql.avro.AvroFileFormat$$anonfun$buildReader$1$$anon$1.next(AvroFileFormat.scala:216) > at > org.apache.spark.sql.avro.AvroFileFormat$$anonfun$buildReader$1$$anon$1.next(AvroFileFormat.scala:195) > at scala.collection.Iterator$$anon$11.next(Iterator.scala:410) > at > org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.next(FileScanRDD.scala:104) > at scala.collection.Iterator$$anon$11.next(Iterator.scala:410) > at scala.collection.Iterator$$anon$11.next(Iterator.scala:410) > at scala.collection.Iterator$$anon$11.next(Iterator.scala:410) > at > org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:149) > at > org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99) > at > org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55) > at org.apache.spark.scheduler.Task.run(Task.scala:121) > at > org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402) > at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360) > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) > at java.lang.Thread.run(Thread.java:748) > {noformat} > > -- This message was sent by Atlassian JIRA (v7.6.3#76005) --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org For additional commands, e-mail: issues-h...@spark.apache.org