MaxGekk commented on code in PR #43438:
URL: https://github.com/apache/spark/pull/43438#discussion_r1366524094


##########
sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/CliSuite.scala:
##########
@@ -719,7 +720,7 @@ class CliSuite extends SparkFunSuite {
       format = ErrorMessageFormat.PRETTY,
       errorMessage =
         """[DIVIDE_BY_ZERO] Division by zero. Use `try_divide` to tolerate 
divisor being 0 and return NULL instead. If necessary set 
"spark.sql.ansi.enabled" to "false" to bypass this error.

Review Comment:
   Just wonder why there is no SQLSTATE here, @srielau any ideas why?



##########
core/src/test/scala/org/apache/spark/ui/UIUtilsSuite.scala:
##########
@@ -192,7 +192,7 @@ class UIUtilsSuite extends SparkFunSuite {
 
   // scalastyle:off line.size.limit
   test("SPARK-44367: Extract errorClass from errorMsg with errorMessageCell") {
-    val e1 = "Job aborted due to stage failure: Task 0 in stage 1.0 failed 1 
times, most recent failure: Lost task 0.0 in stage 1.0 (TID 1) (10.221.98.22 
executor driver): org.apache.spark.SparkArithmeticException: [DIVIDE_BY_ZERO] 
Division by zero. Use `try_divide` to tolerate divisor being 0 and return NULL 
instead. If necessary set \"spark.sql.ansi.enabled\" to \"false\" to bypass 
this error.\n== SQL(line 1, position 8) ==\nselect a/b from src\n       
^^^\n\n\tat 
org.apache.spark.sql.errors.QueryExecutionErrors$.divideByZeroError(QueryExecutionErrors.scala:226)\n\tat
 
org.apache.spark.sql.errors.QueryExecutionErrors.divideByZeroError(QueryExecutionErrors.scala)\n\tat
 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:54)\n\tat
 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)\n\tat
 
org.apache.spark.sql.execution.WholeStageCodegenEvaluatorFactory$WholeStageCodegenPartitionEvalua
 tor$$anon$1.hasNext(WholeStageCodegenEvaluatorFactory.scala:43)\n\tat 
org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:388)\n\tat
 org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:890)\n\tat 
org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:890)\n\tat
 org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)\n\tat 
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:364)\n\tat 
org.apache.spark.rdd.RDD.iterator(RDD.scala:328)\n\tat 
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)\n\tat 
org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:161)\n\tat 
org.apache.spark.scheduler.Task.run(Task.scala:141)\n\tat 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:592)\n\tat
 org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1474)\n\tat 
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:595)\n\tat 
java.util.concurr
 ent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat
 java.lang.Thread.run(Thread.java:750)\n\nDriver stacktrace:"
+    val e1 = "Job aborted due to stage failure: Task 0 in stage 1.0 failed 1 
times, most recent failure: Lost task 0.0 in stage 1.0 (TID 1) (10.221.98.22 
executor driver): org.apache.spark.SparkArithmeticException: [DIVIDE_BY_ZERO] 
Division by zero. Use `try_divide` to tolerate divisor being 0 and return NULL 
instead. If necessary set \"spark.sql.ansi.enabled\" to \"false\" to bypass 
this error.\n== SQL (line 1, position 8) ==\nselect a/b from src\n       
^^^\n\n\tat 
org.apache.spark.sql.errors.QueryExecutionErrors$.divideByZeroError(QueryExecutionErrors.scala:226)\n\tat
 
org.apache.spark.sql.errors.QueryExecutionErrors.divideByZeroError(QueryExecutionErrors.scala)\n\tat
 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:54)\n\tat
 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)\n\tat
 
org.apache.spark.sql.execution.WholeStageCodegenEvaluatorFactory$WholeStageCodegenPartitionEvalu
 ator$$anon$1.hasNext(WholeStageCodegenEvaluatorFactory.scala:43)\n\tat 
org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:388)\n\tat
 org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:890)\n\tat 
org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:890)\n\tat
 org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)\n\tat 
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:364)\n\tat 
org.apache.spark.rdd.RDD.iterator(RDD.scala:328)\n\tat 
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:93)\n\tat 
org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:161)\n\tat 
org.apache.spark.scheduler.Task.run(Task.scala:141)\n\tat 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:592)\n\tat
 org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1474)\n\tat 
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:595)\n\tat 
java.util.concur
 rent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat
 java.lang.Thread.run(Thread.java:750)\n\nDriver stacktrace:"

Review Comment:
   Why there is no `SQLSTATE` in the error message, any ideas?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to