This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
     new d3aadb40370 [SPARK-39087][SQL][3.3] Improve messages of error classes
d3aadb40370 is described below

commit d3aadb40370c0613c2d2ce41d8b905f0fafcd69c
Author: Max Gekk <max.g...@gmail.com>
AuthorDate: Wed May 4 08:45:03 2022 +0300

    [SPARK-39087][SQL][3.3] Improve messages of error classes
    
    ### What changes were proposed in this pull request?
    In the PR, I propose to modify error messages of the following error 
classes:
    - INVALID_JSON_SCHEMA_MAP_TYPE
    - INCOMPARABLE_PIVOT_COLUMN
    - INVALID_ARRAY_INDEX_IN_ELEMENT_AT
    - INVALID_ARRAY_INDEX
    - DIVIDE_BY_ZERO
    
    This is a backport of https://github.com/apache/spark/pull/36428.
    
    ### Why are the changes needed?
    To improve readability of error messages.
    
    ### Does this PR introduce _any_ user-facing change?
    Yes. It changes user-facing error messages.
    
    ### How was this patch tested?
    By running the modified test suites:
    ```
    $ build/sbt "sql/testOnly *QueryCompilationErrorsSuite*"
    $ build/sbt "sql/testOnly *QueryExecutionErrorsSuite*"
    $ build/sbt "sql/testOnly *QueryExecutionAnsiErrorsSuite"
    $ build/sbt "test:testOnly *SparkThrowableSuite"
    ```
    
    Authored-by: Max Gekk <max.gekkgmail.com>
    Signed-off-by: Max Gekk <max.gekkgmail.com>
    (cherry picked from commit 040526391a45ad610422a48c05aa69ba5133f922)
    Signed-off-by: Max Gekk <max.gekkgmail.com>
    
    Closes #36439 from MaxGekk/error-class-improve-msg-3.3.
    
    Authored-by: Max Gekk <max.g...@gmail.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 core/src/main/resources/error/error-classes.json   | 12 ++++-----
 .../org/apache/spark/SparkThrowableSuite.scala     |  2 +-
 .../spark/sql/errors/QueryCompilationErrors.scala  |  6 ++---
 .../expressions/ArithmeticExpressionSuite.scala    | 30 +++++++++++-----------
 .../expressions/CollectionExpressionsSuite.scala   |  4 +--
 .../catalyst/expressions/ComplexTypeSuite.scala    |  4 +--
 .../expressions/IntervalExpressionsSuite.scala     | 10 ++++----
 .../expressions/StringExpressionsSuite.scala       |  6 ++---
 .../sql/catalyst/util/IntervalUtilsSuite.scala     |  2 +-
 .../resources/sql-tests/results/ansi/array.sql.out | 24 ++++++++---------
 .../sql-tests/results/ansi/interval.sql.out        |  4 +--
 .../resources/sql-tests/results/interval.sql.out   |  4 +--
 .../test/resources/sql-tests/results/pivot.sql.out |  4 +--
 .../sql-tests/results/postgreSQL/case.sql.out      |  6 ++---
 .../sql-tests/results/postgreSQL/int8.sql.out      |  6 ++---
 .../results/postgreSQL/select_having.sql.out       |  2 +-
 .../results/udf/postgreSQL/udf-case.sql.out        |  6 ++---
 .../udf/postgreSQL/udf-select_having.sql.out       |  2 +-
 .../sql-tests/results/udf/udf-pivot.sql.out        |  4 +--
 .../apache/spark/sql/ColumnExpressionSuite.scala   | 12 ++++-----
 .../org/apache/spark/sql/DataFrameSuite.scala      |  2 +-
 .../apache/spark/sql/execution/SQLViewSuite.scala  |  4 +--
 .../sql/streaming/FileStreamSourceSuite.scala      |  2 +-
 23 files changed, 79 insertions(+), 79 deletions(-)

diff --git a/core/src/main/resources/error/error-classes.json 
b/core/src/main/resources/error/error-classes.json
index 463a5eae534..78934667ac0 100644
--- a/core/src/main/resources/error/error-classes.json
+++ b/core/src/main/resources/error/error-classes.json
@@ -37,7 +37,7 @@
     "sqlState" : "22008"
   },
   "DIVIDE_BY_ZERO" : {
-    "message" : [ "divide by zero. To return NULL instead, use 'try_divide'. 
If necessary set <config> to false (except for ANSI interval type) to bypass 
this error.<details>" ],
+    "message" : [ "Division by zero. To return NULL instead, use `try_divide`. 
If necessary set <config> to false (except for ANSI interval type) to bypass 
this error.<details>" ],
     "sqlState" : "22012"
   },
   "DUPLICATE_KEY" : {
@@ -72,7 +72,7 @@
     "message" : [ "Grouping sets size cannot be greater than <maxSize>" ]
   },
   "INCOMPARABLE_PIVOT_COLUMN" : {
-    "message" : [ "Invalid pivot column '<columnName>'. Pivot columns must be 
comparable." ],
+    "message" : [ "Invalid pivot column <columnName>. Pivot columns must be 
comparable." ],
     "sqlState" : "42000"
   },
   "INCOMPATIBLE_DATASOURCE_REGISTER" : {
@@ -89,10 +89,10 @@
     "message" : [ "<message>" ]
   },
   "INVALID_ARRAY_INDEX" : {
-    "message" : [ "Invalid index: <indexValue>, numElements: <arraySize>. If 
necessary set <config> to false to bypass this error." ]
+    "message" : [ "The index <indexValue> is out of bounds. The array has 
<arraySize> elements. If necessary set <config> to false to bypass this error." 
]
   },
   "INVALID_ARRAY_INDEX_IN_ELEMENT_AT" : {
-    "message" : [ "Invalid index: <indexValue>, numElements: <arraySize>. To 
return NULL instead, use 'try_element_at'. If necessary set <config> to false 
to bypass this error." ]
+    "message" : [ "The index <indexValue> is out of bounds. The array has 
<arraySize> elements. To return NULL instead, use `try_element_at`. If 
necessary set <config> to false to bypass this error." ]
   },
   "INVALID_FIELD_NAME" : {
     "message" : [ "Field name <fieldName> is invalid: <path> is not a struct." 
],
@@ -102,8 +102,8 @@
     "message" : [ "The fraction of sec must be zero. Valid range is [0, 60]. 
If necessary set <config> to false to bypass this error. " ],
     "sqlState" : "22023"
   },
-  "INVALID_JSON_SCHEMA_MAPTYPE" : {
-    "message" : [ "Input schema <dataType> can only contain StringType as a 
key type for a MapType." ]
+  "INVALID_JSON_SCHEMA_MAP_TYPE" : {
+    "message" : [ "Input schema <jsonSchema> can only contain STRING as a key 
type for a MAP." ]
   },
   "INVALID_PARAMETER_VALUE" : {
     "message" : [ "The value of parameter(s) '<parameter>' in <functionName> 
is invalid: <expected>" ],
diff --git a/core/src/test/scala/org/apache/spark/SparkThrowableSuite.scala 
b/core/src/test/scala/org/apache/spark/SparkThrowableSuite.scala
index f1eb27cd4d9..acfe721914b 100644
--- a/core/src/test/scala/org/apache/spark/SparkThrowableSuite.scala
+++ b/core/src/test/scala/org/apache/spark/SparkThrowableSuite.scala
@@ -125,7 +125,7 @@ class SparkThrowableSuite extends SparkFunSuite {
 
     // Does not fail with too many args (expects 0 args)
     assert(getMessage("DIVIDE_BY_ZERO", Array("foo", "bar", "baz")) ==
-      "divide by zero. To return NULL instead, use 'try_divide'. If necessary 
set foo to false " +
+      "Division by zero. To return NULL instead, use `try_divide`. If 
necessary set foo to false " +
         "(except for ANSI interval type) to bypass this error.bar")
   }
 
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
index 82b19668b92..8925d4f5317 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
@@ -76,7 +76,7 @@ object QueryCompilationErrors extends QueryErrorsBase {
   def unorderablePivotColError(pivotCol: Expression): Throwable = {
     new AnalysisException(
       errorClass = "INCOMPARABLE_PIVOT_COLUMN",
-      messageParameters = Array(pivotCol.toString))
+      messageParameters = Array(toSQLId(pivotCol.sql)))
   }
 
   def nonLiteralPivotValError(pivotVal: Expression): Throwable = {
@@ -2377,8 +2377,8 @@ object QueryCompilationErrors extends QueryErrorsBase {
 
   def invalidJsonSchema(schema: DataType): Throwable = {
     new AnalysisException(
-      errorClass = "INVALID_JSON_SCHEMA_MAPTYPE",
-      messageParameters = Array(schema.toString))
+      errorClass = "INVALID_JSON_SCHEMA_MAP_TYPE",
+      messageParameters = Array(toSQLType(schema)))
   }
 
   def tableIndexNotSupportedError(errorMessage: String): Throwable = {
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ArithmeticExpressionSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ArithmeticExpressionSuite.scala
index b167b5f7a16..87777991cb9 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ArithmeticExpressionSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ArithmeticExpressionSuite.scala
@@ -243,7 +243,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with 
ExpressionEvalHelper
       }
       withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") {
         checkExceptionInExpression[ArithmeticException](
-          Divide(left, Literal(convert(0))), "divide by zero")
+          Divide(left, Literal(convert(0))), "Division by zero")
       }
     }
 
@@ -287,7 +287,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with 
ExpressionEvalHelper
       }
       withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") {
         checkExceptionInExpression[ArithmeticException](
-          IntegralDivide(left, Literal(convert(0))), "divide by zero")
+          IntegralDivide(left, Literal(convert(0))), "Division by zero")
       }
     }
     checkEvaluation(IntegralDivide(positiveLongLit, negativeLongLit), 0L)
@@ -339,7 +339,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with 
ExpressionEvalHelper
       }
       withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") {
         checkExceptionInExpression[ArithmeticException](
-          Remainder(left, Literal(convert(0))), "divide by zero")
+          Remainder(left, Literal(convert(0))), "Division by zero")
       }
     }
     checkEvaluation(Remainder(positiveShortLit, positiveShortLit), 0.toShort)
@@ -444,7 +444,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with 
ExpressionEvalHelper
       }
       withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") {
         checkExceptionInExpression[ArithmeticException](
-          Pmod(left, Literal(convert(0))), "divide by zero")
+          Pmod(left, Literal(convert(0))), "Division by zero")
       }
     }
     checkEvaluation(Pmod(Literal(-7), Literal(3)), 2)
@@ -608,7 +608,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with 
ExpressionEvalHelper
     }
     withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") {
       checkExceptionInExpression[ArithmeticException](
-        IntegralDivide(Literal(Decimal(0.2)), Literal(Decimal(0.0))), "divide 
by zero")
+        IntegralDivide(Literal(Decimal(0.2)), Literal(Decimal(0.0))), 
"Division by zero")
     }
     // overflows long and so returns a wrong result
     checkEvaluation(DecimalPrecision.decimalAndDecimal.apply(IntegralDivide(
@@ -755,7 +755,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with 
ExpressionEvalHelper
           val zero = Literal(convert(0))
           checkEvaluation(operator(Literal.create(null, one.dataType), zero), 
null)
           checkEvaluation(operator(one, Literal.create(null, zero.dataType)), 
null)
-          checkExceptionInExpression[ArithmeticException](operator(one, zero), 
"divide by zero")
+          checkExceptionInExpression[ArithmeticException](operator(one, zero), 
"Division by zero")
         }
       }
     }
@@ -814,7 +814,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with 
ExpressionEvalHelper
           checkEvaluation(operator(Literal.create(null, one.dataType), zero), 
null)
           checkEvaluation(operator(one, Literal.create(null, zero.dataType)), 
null)
           checkExceptionInExpression[SparkArithmeticException](operator(one, 
zero),
-            "divide by zero")
+            "Division by zero")
         }
       }
     }
@@ -862,13 +862,13 @@ class ArithmeticExpressionSuite extends SparkFunSuite 
with ExpressionEvalHelper
     }
     withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") {
       checkExceptionInExpression[ArithmeticException](
-        IntegralDivide(Literal(Period.ZERO), Literal(Period.ZERO)), "divide by 
zero")
+        IntegralDivide(Literal(Period.ZERO), Literal(Period.ZERO)), "Division 
by zero")
       checkExceptionInExpression[ArithmeticException](
-        IntegralDivide(Literal(Period.ofYears(1)), Literal(Period.ZERO)), 
"divide by zero")
+        IntegralDivide(Literal(Period.ofYears(1)), Literal(Period.ZERO)), 
"Division by zero")
       checkExceptionInExpression[ArithmeticException](
-        IntegralDivide(Period.ofMonths(Int.MinValue), Literal(Period.ZERO)), 
"divide by zero")
+        IntegralDivide(Period.ofMonths(Int.MinValue), Literal(Period.ZERO)), 
"Division by zero")
       checkExceptionInExpression[ArithmeticException](
-        IntegralDivide(Period.ofMonths(Int.MaxValue), Literal(Period.ZERO)), 
"divide by zero")
+        IntegralDivide(Period.ofMonths(Int.MaxValue), Literal(Period.ZERO)), 
"Division by zero")
     }
 
     checkEvaluation(IntegralDivide(Literal.create(null, 
YearMonthIntervalType()),
@@ -914,16 +914,16 @@ class ArithmeticExpressionSuite extends SparkFunSuite 
with ExpressionEvalHelper
     }
     withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") {
       checkExceptionInExpression[ArithmeticException](
-        IntegralDivide(Literal(Duration.ZERO), Literal(Duration.ZERO)), 
"divide by zero")
+        IntegralDivide(Literal(Duration.ZERO), Literal(Duration.ZERO)), 
"Division by zero")
       checkExceptionInExpression[ArithmeticException](
         IntegralDivide(Literal(Duration.ofDays(1)),
-          Literal(Duration.ZERO)), "divide by zero")
+          Literal(Duration.ZERO)), "Division by zero")
       checkExceptionInExpression[ArithmeticException](
         IntegralDivide(Literal(Duration.of(Long.MaxValue, ChronoUnit.MICROS)),
-          Literal(Duration.ZERO)), "divide by zero")
+          Literal(Duration.ZERO)), "Division by zero")
       checkExceptionInExpression[ArithmeticException](
         IntegralDivide(Literal(Duration.of(Long.MinValue, ChronoUnit.MICROS)),
-          Literal(Duration.ZERO)), "divide by zero")
+          Literal(Duration.ZERO)), "Division by zero")
     }
 
     checkEvaluation(IntegralDivide(Literal.create(null, DayTimeIntervalType()),
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala
index 3cf3b4469a4..fb4bf43ba83 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala
@@ -2288,7 +2288,7 @@ class CollectionExpressionsSuite extends SparkFunSuite 
with ExpressionEvalHelper
         val array = Literal.create(Seq(1, 2, 3), ArrayType(IntegerType))
         var expr: Expression = ElementAt(array, Literal(5))
         if (ansiEnabled) {
-          val errMsg = "Invalid index: 5, numElements: 3"
+          val errMsg = "The index 5 is out of bounds. The array has 3 
elements."
           checkExceptionInExpression[Exception](expr, errMsg)
         } else {
           checkEvaluation(expr, null)
@@ -2296,7 +2296,7 @@ class CollectionExpressionsSuite extends SparkFunSuite 
with ExpressionEvalHelper
 
         expr = ElementAt(array, Literal(-5))
         if (ansiEnabled) {
-          val errMsg = "Invalid index: -5, numElements: 3"
+          val errMsg = "The index -5 is out of bounds. The array has 3 
elements."
           checkExceptionInExpression[Exception](expr, errMsg)
         } else {
           checkEvaluation(expr, null)
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala
index 57abdb4de22..3ab8afcac1e 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala
@@ -70,12 +70,12 @@ class ComplexTypeSuite extends SparkFunSuite with 
ExpressionEvalHelper {
         if (ansiEnabled) {
           checkExceptionInExpression[Exception](
             GetArrayItem(array, Literal(5)),
-            "Invalid index: 5, numElements: 2"
+            "The index 5 is out of bounds. The array has 2 elements."
           )
 
           checkExceptionInExpression[Exception](
             GetArrayItem(array, Literal(-1)),
-            "Invalid index: -1, numElements: 2"
+            "The index -1 is out of bounds. The array has 2 elements."
           )
         } else {
           checkEvaluation(GetArrayItem(array, Literal(5)), null)
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/IntervalExpressionsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/IntervalExpressionsSuite.scala
index 05f9d0f6696..b9c7629f692 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/IntervalExpressionsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/IntervalExpressionsSuite.scala
@@ -176,7 +176,7 @@ class IntervalExpressionsSuite extends SparkFunSuite with 
ExpressionEvalHelper {
     check("2 years -8 seconds", 0.5, "4 years -16 seconds")
     check("-1 month 2 microseconds", -0.25, "4 months -8 microseconds")
     check("1 month 3 microsecond", 1.5, "2 microseconds")
-    check("1 second", 0, "divide by zero", Some(true))
+    check("1 second", 0, "Division by zero", Some(true))
     check("1 second", 0, null, Some(false))
     check(s"${Int.MaxValue} months", 0.9, "integer overflow", Some(true))
     check(s"${Int.MaxValue} months", 0.9, Int.MaxValue + " months", 
Some(false))
@@ -412,8 +412,8 @@ class IntervalExpressionsSuite extends SparkFunSuite with 
ExpressionEvalHelper {
     }
 
     Seq(
-      (Period.ofMonths(1), 0) -> "divide by zero",
-      (Period.ofMonths(Int.MinValue), 0d) -> "divide by zero",
+      (Period.ofMonths(1), 0) -> "Division by zero",
+      (Period.ofMonths(Int.MinValue), 0d) -> "Division by zero",
       (Period.ofMonths(-100), Float.NaN) -> "input is infinite or NaN"
     ).foreach { case ((period, num), expectedErrMsg) =>
       checkExceptionInExpression[ArithmeticException](
@@ -447,8 +447,8 @@ class IntervalExpressionsSuite extends SparkFunSuite with 
ExpressionEvalHelper {
     }
 
     Seq(
-      (Duration.ofDays(1), 0) -> "divide by zero",
-      (Duration.ofMillis(Int.MinValue), 0d) -> "divide by zero",
+      (Duration.ofDays(1), 0) -> "Division by zero",
+      (Duration.ofMillis(Int.MinValue), 0d) -> "Division by zero",
       (Duration.ofSeconds(-100), Float.NaN) -> "input is infinite or NaN"
     ).foreach { case ((period, num), expectedErrMsg) =>
       checkExceptionInExpression[ArithmeticException](
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala
index 91b3d0c69b8..db7aae99855 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala
@@ -1215,7 +1215,7 @@ class StringExpressionsSuite extends SparkFunSuite with 
ExpressionEvalHelper {
       withSQLConf(SQLConf.ANSI_ENABLED.key -> ansiEnabled.toString) {
         var expr: Expression = Elt(Seq(Literal(4), Literal("123"), 
Literal("456")))
         if (ansiEnabled) {
-          val errMsg = "Invalid index: 4, numElements: 2"
+          val errMsg = "The index 4 is out of bounds. The array has 2 
elements."
           checkExceptionInExpression[Exception](expr, errMsg)
         } else {
           checkEvaluation(expr, null)
@@ -1223,7 +1223,7 @@ class StringExpressionsSuite extends SparkFunSuite with 
ExpressionEvalHelper {
 
         expr = Elt(Seq(Literal(0), Literal("123"), Literal("456")))
         if (ansiEnabled) {
-          val errMsg = "Invalid index: 0, numElements: 2"
+          val errMsg = "The index 0 is out of bounds. The array has 2 
elements."
           checkExceptionInExpression[Exception](expr, errMsg)
         } else {
           checkEvaluation(expr, null)
@@ -1231,7 +1231,7 @@ class StringExpressionsSuite extends SparkFunSuite with 
ExpressionEvalHelper {
 
         expr = Elt(Seq(Literal(-1), Literal("123"), Literal("456")))
         if (ansiEnabled) {
-          val errMsg = "Invalid index: -1, numElements: 2"
+          val errMsg = "The index -1 is out of bounds. The array has 2 
elements."
           checkExceptionInExpression[Exception](expr, errMsg)
         } else {
           checkEvaluation(expr, null)
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala
index 62059c0b996..0e65886a2eb 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala
@@ -307,7 +307,7 @@ class IntervalUtilsSuite extends SparkFunSuite with 
SQLHelper {
     interval = new CalendarInterval(123, 456, 789)
     assert(divide(interval, 0) === null)
     val e2 = intercept[ArithmeticException](divideExact(interval, 0))
-    assert(e2.getMessage.contains("divide by zero"))
+    assert(e2.getMessage.contains("Division by zero"))
   }
 
   test("from day-time string") {
diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out 
b/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out
index 9c659019ba2..64a7cc68b9c 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out
@@ -168,7 +168,7 @@ select element_at(array(1, 2, 3), 5)
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-Invalid index: 5, numElements: 3. To return NULL instead, use 
'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass 
this error.
+The index 5 is out of bounds. The array has 3 elements. To return NULL 
instead, use `try_element_at`. If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
 
 
 -- !query
@@ -177,7 +177,7 @@ select element_at(array(1, 2, 3), -5)
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-Invalid index: -5, numElements: 3. To return NULL instead, use 
'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass 
this error.
+The index -5 is out of bounds. The array has 3 elements. To return NULL 
instead, use `try_element_at`. If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
 
 
 -- !query
@@ -195,7 +195,7 @@ select elt(4, '123', '456')
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-Invalid index: 4, numElements: 2. If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
+The index 4 is out of bounds. The array has 2 elements. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -204,7 +204,7 @@ select elt(0, '123', '456')
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-Invalid index: 0, numElements: 2. If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
+The index 0 is out of bounds. The array has 2 elements. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -213,7 +213,7 @@ select elt(-1, '123', '456')
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-Invalid index: -1, numElements: 2. If necessary set "spark.sql.ansi.enabled" 
to false to bypass this error.
+The index -1 is out of bounds. The array has 2 elements. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -254,7 +254,7 @@ select array(1, 2, 3)[5]
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-Invalid index: 5, numElements: 3. If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
+The index 5 is out of bounds. The array has 3 elements. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -263,7 +263,7 @@ select array(1, 2, 3)[-1]
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-Invalid index: -1, numElements: 3. If necessary set "spark.sql.ansi.enabled" 
to false to bypass this error.
+The index -1 is out of bounds. The array has 3 elements. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -337,7 +337,7 @@ select element_at(array(1, 2, 3), 5)
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-Invalid index: 5, numElements: 3. To return NULL instead, use 
'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass 
this error.
+The index 5 is out of bounds. The array has 3 elements. To return NULL 
instead, use `try_element_at`. If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
 
 
 -- !query
@@ -346,7 +346,7 @@ select element_at(array(1, 2, 3), -5)
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-Invalid index: -5, numElements: 3. To return NULL instead, use 
'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass 
this error.
+The index -5 is out of bounds. The array has 3 elements. To return NULL 
instead, use `try_element_at`. If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
 
 
 -- !query
@@ -364,7 +364,7 @@ select elt(4, '123', '456')
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-Invalid index: 4, numElements: 2. If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
+The index 4 is out of bounds. The array has 2 elements. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -373,7 +373,7 @@ select elt(0, '123', '456')
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-Invalid index: 0, numElements: 2. If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
+The index 0 is out of bounds. The array has 2 elements. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -382,4 +382,4 @@ select elt(-1, '123', '456')
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-Invalid index: -1, numElements: 2. If necessary set "spark.sql.ansi.enabled" 
to false to bypass this error.
+The index -1 is out of bounds. The array has 2 elements. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
diff --git 
a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out 
b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
index ddd04d5bea6..63b79667220 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
@@ -228,7 +228,7 @@ select interval '2 seconds' / 0
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 7) ==
 select interval '2 seconds' / 0
        ^^^^^^^^^^^^^^^^^^^^^^^^
@@ -264,7 +264,7 @@ select interval '2' year / 0
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 7) ==
 select interval '2' year / 0
        ^^^^^^^^^^^^^^^^^^^^^
diff --git a/sql/core/src/test/resources/sql-tests/results/interval.sql.out 
b/sql/core/src/test/resources/sql-tests/results/interval.sql.out
index 5e03105269b..61b4f20e5fd 100644
--- a/sql/core/src/test/resources/sql-tests/results/interval.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/interval.sql.out
@@ -204,7 +204,7 @@ select interval '2 seconds' / 0
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 7) ==
 select interval '2 seconds' / 0
        ^^^^^^^^^^^^^^^^^^^^^^^^
@@ -240,7 +240,7 @@ select interval '2' year / 0
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 7) ==
 select interval '2' year / 0
        ^^^^^^^^^^^^^^^^^^^^^
diff --git a/sql/core/src/test/resources/sql-tests/results/pivot.sql.out 
b/sql/core/src/test/resources/sql-tests/results/pivot.sql.out
index 7c301793b14..54086bcc54e 100644
--- a/sql/core/src/test/resources/sql-tests/results/pivot.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/pivot.sql.out
@@ -458,7 +458,7 @@ PIVOT (
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Invalid pivot column 'm#x'. Pivot columns must be comparable.
+Invalid pivot column `__auto_generated_subquery_name`.`m`. Pivot columns must 
be comparable.
 
 
 -- !query
@@ -475,7 +475,7 @@ PIVOT (
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Invalid pivot column 'named_struct(course, course#x, m, m#x)'. Pivot columns 
must be comparable.
+Invalid pivot column `named_struct('course', 
__auto_generated_subquery_name`.`course, 'm', 
__auto_generated_subquery_name`.`m)`. Pivot columns must be comparable.
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out
index db34e6c71d9..6f28df8358a 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out
@@ -179,7 +179,7 @@ SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 26) ==
 SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END
                           ^^^
@@ -191,7 +191,7 @@ SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 26) ==
 SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END
                           ^^^
@@ -203,7 +203,7 @@ SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 30) ==
 SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl
                               ^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out
index 35d72e2b6ce..6c5673f9ce6 100755
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out
@@ -575,7 +575,7 @@ select bigint('9223372036854775800') / bigint('0')
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 7) ==
 select bigint('9223372036854775800') / bigint('0')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -587,7 +587,7 @@ select bigint('-9223372036854775808') / smallint('0')
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 7) ==
 select bigint('-9223372036854775808') / smallint('0')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -599,7 +599,7 @@ select smallint('100') / bigint('0')
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 7) ==
 select smallint('100') / bigint('0')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
index 5c05026ec89..d91adc7ed24 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
@@ -177,7 +177,7 @@ SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 39) ==
 ...1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2
                                    ^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out
index 1e2f224fe04..27a13805199 100755
--- 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out
@@ -179,7 +179,7 @@ SELECT CASE WHEN udf(1=0) THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 
END
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 31) ==
 SELECT CASE WHEN udf(1=0) THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END
                                ^^^
@@ -191,7 +191,7 @@ SELECT CASE 1 WHEN 0 THEN 1/udf(0) WHEN 1 THEN 1 ELSE 2/0 
END
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 54) ==
 ...HEN 1/udf(0) WHEN 1 THEN 1 ELSE 2/0 END
                                    ^^^
@@ -203,7 +203,7 @@ SELECT CASE WHEN i > 100 THEN udf(1/0) ELSE udf(0) END FROM 
case_tbl
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 34) ==
 ...LECT CASE WHEN i > 100 THEN udf(1/0) ELSE udf(0) END FROM case_tbl
                                    ^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
index f054ef6ff02..dfb287ff023 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
@@ -177,7 +177,7 @@ SELECT 1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 
1 < 2
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-divide by zero. To return NULL instead, use 'try_divide'. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
+Division by zero. To return NULL instead, use `try_divide`. If necessary set 
"spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass 
this error.
 == SQL(line 1, position 39) ==
 ...1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 1 < 2
                                    ^^^^^^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out 
b/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out
index 2f479250682..7b986a25be0 100644
--- a/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out
@@ -424,7 +424,7 @@ PIVOT (
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Invalid pivot column 'm#x'. Pivot columns must be comparable.
+Invalid pivot column `__auto_generated_subquery_name`.`m`. Pivot columns must 
be comparable.
 
 
 -- !query
@@ -441,7 +441,7 @@ PIVOT (
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Invalid pivot column 'named_struct(course, course#x, m, m#x)'. Pivot columns 
must be comparable.
+Invalid pivot column `named_struct('course', 
__auto_generated_subquery_name`.`course, 'm', 
__auto_generated_subquery_name`.`m)`. Pivot columns must be comparable.
 
 
 -- !query
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
index b392b7536f5..4256e5bc164 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
@@ -2766,19 +2766,19 @@ class ColumnExpressionSuite extends QueryTest with 
SharedSparkSession {
       Seq((Period.ofYears(9999), 0)).toDF("i", "n").select($"i" / 
$"n").collect()
     }.getCause
     assert(e.isInstanceOf[ArithmeticException])
-    assert(e.getMessage.contains("divide by zero"))
+    assert(e.getMessage.contains("Division by zero"))
 
     val e2 = intercept[SparkException] {
       Seq((Period.ofYears(9999), 0d)).toDF("i", "n").select($"i" / 
$"n").collect()
     }.getCause
     assert(e2.isInstanceOf[ArithmeticException])
-    assert(e2.getMessage.contains("divide by zero"))
+    assert(e2.getMessage.contains("Division by zero"))
 
     val e3 = intercept[SparkException] {
       Seq((Period.ofYears(9999), BigDecimal(0))).toDF("i", "n").select($"i" / 
$"n").collect()
     }.getCause
     assert(e3.isInstanceOf[ArithmeticException])
-    assert(e3.getMessage.contains("divide by zero"))
+    assert(e3.getMessage.contains("Division by zero"))
   }
 
   test("SPARK-34875: divide day-time interval by numeric") {
@@ -2813,19 +2813,19 @@ class ColumnExpressionSuite extends QueryTest with 
SharedSparkSession {
       Seq((Duration.ofDays(9999), 0)).toDF("i", "n").select($"i" / 
$"n").collect()
     }.getCause
     assert(e.isInstanceOf[ArithmeticException])
-    assert(e.getMessage.contains("divide by zero"))
+    assert(e.getMessage.contains("Division by zero"))
 
     val e2 = intercept[SparkException] {
       Seq((Duration.ofDays(9999), 0d)).toDF("i", "n").select($"i" / 
$"n").collect()
     }.getCause
     assert(e2.isInstanceOf[ArithmeticException])
-    assert(e2.getMessage.contains("divide by zero"))
+    assert(e2.getMessage.contains("Division by zero"))
 
     val e3 = intercept[SparkException] {
       Seq((Duration.ofDays(9999), BigDecimal(0))).toDF("i", "n").select($"i" / 
$"n").collect()
     }.getCause
     assert(e3.isInstanceOf[ArithmeticException])
-    assert(e3.getMessage.contains("divide by zero"))
+    assert(e3.getMessage.contains("Division by zero"))
   }
 
   test("SPARK-34896: return day-time interval from dates subtraction") {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
index d4e48254016..d16416d600d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
@@ -3043,7 +3043,7 @@ class DataFrameSuite extends QueryTest
     ).foreach { case (schema, jsonData) =>
       withTempDir { dir =>
         val colName = "col"
-        val msg = "can only contain StringType as a key type for a MapType"
+        val msg = "can only contain STRING as a key type for a MAP"
 
         val thrown1 = intercept[AnalysisException](
           spark.read.schema(StructType(Seq(StructField(colName, schema))))
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala
index 77513c560f0..52aa1066f59 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala
@@ -897,7 +897,7 @@ abstract class SQLViewSuite extends QueryTest with 
SQLTestUtils {
             val e = intercept[ArithmeticException] {
               sql("SELECT * FROM v5").collect()
             }.getMessage
-            assert(e.contains("divide by zero"))
+            assert(e.contains("Division by zero"))
           }
         }
 
@@ -907,7 +907,7 @@ abstract class SQLViewSuite extends QueryTest with 
SQLTestUtils {
         val e = intercept[ArithmeticException] {
           sql("SELECT * FROM v1").collect()
         }.getMessage
-        assert(e.contains("divide by zero"))
+        assert(e.contains("Division by zero"))
       }
     }
   }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala
index b9073237022..1297adb5135 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala
@@ -2332,7 +2332,7 @@ class FileStreamSourceSuite extends FileStreamSourceTest {
     ).foreach { schema =>
       withTempDir { dir =>
         val colName = "col"
-        val msg = "can only contain StringType as a key type for a MapType"
+        val msg = "can only contain STRING as a key type for a MAP"
 
         val thrown1 = intercept[AnalysisException](
           spark.readStream.schema(StructType(Seq(StructField(colName, 
schema))))


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to