[
https://issues.apache.org/jira/browse/SPARK-40299?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17599634#comment-17599634
]
Bhavya Jain commented on SPARK-40299:
-------------------------------------
[~code1v5] Can you please elaborate more on the issue ?
Add some examples and scenarios on how we can replicate the issue ?
> java api calls the count() method to appear: java.lang.ArithmeticException:
> BigInteger would overflow supported range
> ---------------------------------------------------------------------------------------------------------------------
>
> Key: SPARK-40299
> URL: https://issues.apache.org/jira/browse/SPARK-40299
> Project: Spark
> Issue Type: Bug
> Components: Java API
> Affects Versions: 2.3.2
> Reporter: code1v5
> Priority: Major
>
> ive Session ID = a372ea31-ac98-4e01-9de3-dfb623df87a4
> 22/09/01 13:50:32 WARN SessionState: METASTORE_FILTER_HOOK will be ignored,
> since hive.security.authorization.manager is set to instance of
> HiveAuthorizerFactory.
> [Stage 0:> (0 + 8) /
> 8]22/09/01 13:50:41 WARN TaskSetManager: Lost task 5.0 in stage 0.0 (TID 5,
> hdp3-10-106, executor 6): java.lang.ArithmeticException: BigInteger would
> overflow supported range
> at java.math.BigInteger.reportOverflow(BigInteger.java:1084)
> at java.math.BigInteger.pow(BigInteger.java:2391)
> at java.math.BigDecimal.bigTenToThe(BigDecimal.java:3574)
> at java.math.BigDecimal.bigMultiplyPowerTen(BigDecimal.java:3707)
> at java.math.BigDecimal.setScale(BigDecimal.java:2448)
> at java.math.BigDecimal.setScale(BigDecimal.java:2515)
> at
> org.apache.hadoop.hive.common.type.HiveDecimal.trim(HiveDecimal.java:241)
> at
> org.apache.hadoop.hive.common.type.HiveDecimal.normalize(HiveDecimal.java:252)
> at
> org.apache.hadoop.hive.common.type.HiveDecimal.create(HiveDecimal.java:83)
> at
> org.apache.hadoop.hive.serde2.lazy.LazyHiveDecimal.init(LazyHiveDecimal.java:79)
> at
> org.apache.hadoop.hive.serde2.lazy.LazyStruct.uncheckedGetField(LazyStruct.java:226)
> at
> org.apache.hadoop.hive.serde2.lazy.LazyStruct.getField(LazyStruct.java:202)
> at
> org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector.getStructFieldData(LazySimpleStructObjectInspector.java:128)
> at
> org.apache.spark.sql.hive.HadoopTableReader$$anonfun$fillObject$2.apply(TableReader.scala:439)
> at
> org.apache.spark.sql.hive.HadoopTableReader$$anonfun$fillObject$2.apply(TableReader.scala:434)
> at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
> at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
> at
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(Unknown
> Source)
> at
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown
> Source)
> at
> org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
> at
> org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
> at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
> at
> org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
> at org.apache.spark.scheduler.Task.run(Task.scala:109)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:748)
> 22/09/01 13:50:42 ERROR TaskSetManager: Task 5 in stage 0.0 failed 4 times;
> aborting job
> 22/09/01 13:50:42 WARN TaskSetManager: Lost task 7.0 in stage 0.0 (TID 7,
> hdp2-10-105, executor 8): TaskKilled (Stage cancelled)
> [Stage 0:> (0 + 6) /
> 8]org.apache.spark.SparkException: Job aborted due to stage failure: Task 5
> in stage 0.0 failed 4 times, most recent failure: Lost task 5.3 in stage 0.0
> (TID 10, hdp3-10-106, executor 6): java.lang.ArithmeticException: BigInteger
> would overflow supported range
> at java.math.BigInteger.reportOverflow(BigInteger.java:1084)
> at java.math.BigInteger.pow(BigInteger.java:2391)
> at java.math.BigDecimal.bigTenToThe(BigDecimal.java:3574)
> at java.math.BigDecimal.bigMultiplyPowerTen(BigDecimal.java:3707)
> at java.math.BigDecimal.setScale(BigDecimal.java:2448)
> at java.math.BigDecimal.setScale(BigDecimal.java:2515)
> at
> org.apache.hadoop.hive.common.type.HiveDecimal.trim(HiveDecimal.java:241)
> at
> org.apache.hadoop.hive.common.type.HiveDecimal.normalize(HiveDecimal.java:252)
> at
> org.apache.hadoop.hive.common.type.HiveDecimal.create(HiveDecimal.java:83)
> at
> org.apache.hadoop.hive.serde2.lazy.LazyHiveDecimal.init(LazyHiveDecimal.java:79)
> at
> org.apache.hadoop.hive.serde2.lazy.LazyStruct.uncheckedGetField(LazyStruct.java:226)
> at
> org.apache.hadoop.hive.serde2.lazy.LazyStruct.getField(LazyStruct.java:202)
> at
> org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector.getStructFieldData(LazySimpleStructObjectInspector.java:128)
> at
> org.apache.spark.sql.hive.HadoopTableReader$$anonfun$fillObject$2.apply(TableReader.scala:439)
> at
> org.apache.spark.sql.hive.HadoopTableReader$$anonfun$fillObject$2.apply(TableReader.scala:434)
> at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
> at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
> at
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(Unknown
> Source)
> at
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown
> Source)
> at
> org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
> at
> org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
> at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
> at
> org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
> at org.apache.spark.scheduler.Task.run(Task.scala:109)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:748)
> Driver stacktrace:
> at
> org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1651)
> at
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1639)
> at
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1638)
> at
> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
> at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
> at
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1638)
> at
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
> at
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
> at scala.Option.foreach(Option.scala:257)
> at
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:831)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1872)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1821)
> at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1810)
> at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
> at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:642)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2039)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2060)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2079)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2104)
> at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:945)
> at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> at
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
> at org.apache.spark.rdd.RDD.collect(RDD.scala:944)
> at
> org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:297)
> at org.apache.spark.sql.Dataset$$anonfun$count$1.apply(Dataset.scala:2775)
> at org.apache.spark.sql.Dataset$$anonfun$count$1.apply(Dataset.scala:2774)
> at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3259)
> at
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77)
> at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3258)
> at org.apache.spark.sql.Dataset.count(Dataset.scala:2774)
> ... 49 elided
> Caused by: java.lang.ArithmeticException: BigInteger would overflow supported
> range
> at java.math.BigInteger.reportOverflow(BigInteger.java:1084)
> at java.math.BigInteger.pow(BigInteger.java:2391)
> at java.math.BigDecimal.bigTenToThe(BigDecimal.java:3574)
> at java.math.BigDecimal.bigMultiplyPowerTen(BigDecimal.java:3707)
> at java.math.BigDecimal.setScale(BigDecimal.java:2448)
> at java.math.BigDecimal.setScale(BigDecimal.java:2515)
> at org.apache.hadoop.hive.common.type.HiveDecimal.trim(HiveDecimal.java:241)
> at
> org.apache.hadoop.hive.common.type.HiveDecimal.normalize(HiveDecimal.java:252)
> at
> org.apache.hadoop.hive.common.type.HiveDecimal.create(HiveDecimal.java:83)
> at
> org.apache.hadoop.hive.serde2.lazy.LazyHiveDecimal.init(LazyHiveDecimal.java:79)
> at
> org.apache.hadoop.hive.serde2.lazy.LazyStruct.uncheckedGetField(LazyStruct.java:226)
> at
> org.apache.hadoop.hive.serde2.lazy.LazyStruct.getField(LazyStruct.java:202)
> at
> org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector.getStructFieldData(LazySimpleStructObjectInspector.java:128)
> at
> org.apache.spark.sql.hive.HadoopTableReader$$anonfun$fillObject$2.apply(TableReader.scala:439)
> at
> org.apache.spark.sql.hive.HadoopTableReader$$anonfun$fillObject$2.apply(TableReader.scala:434)
> at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
> at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
> at
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(Unknown
> Source)
> at
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown
> Source)
> at
> org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
> at
> org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
> at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
> at
> org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
> at org.apache.spark.scheduler.Task.run(Task.scala:109)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:748)
--
This message was sent by Atlassian Jira
(v8.20.10#820010)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]