This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new f01bff971e3 [SPARK-39007][SQL] Use double quotes for SQL configs in 
error messages
f01bff971e3 is described below

commit f01bff971e36870e101b2f76195e0d380db64e0c
Author: Max Gekk <max.g...@gmail.com>
AuthorDate: Mon Apr 25 11:55:34 2022 +0300

    [SPARK-39007][SQL] Use double quotes for SQL configs in error messages
    
    ### What changes were proposed in this pull request?
    Wrap SQL configs in error messages by double quotes. Added the 
`toSQLConf()` method to `QueryErrorsBase` to invoke it from `Query.*Errors`.
    
    ### Why are the changes needed?
    1. To highlight types and make them more visible for users.
    2. To be able to easily parse types from error text.
    3. To be consistent to other outputs of identifiers, sql statement and etc. 
where Spark uses quotes or ticks.
    
    ### Does this PR introduce _any_ user-facing change?
    Yes, it changes user-facing error messages.
    
    ### How was this patch tested?
    By running the modified test suites:
    ```
    $ build/sbt "testOnly *QueryCompilationErrorsSuite"
    $ build/sbt "testOnly *QueryExecutionAnsiErrorsSuite"
    $ build/sbt "testOnly *QueryExecutionErrorsSuite"
    ```
    
    Closes #36335 from MaxGekk/output-conf-error-class.
    
    Authored-by: Max Gekk <max.g...@gmail.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 core/src/main/resources/error/error-classes.json   |  2 +-
 .../apache/spark/sql/errors/QueryErrorsBase.scala  | 12 +++++--
 .../spark/sql/errors/QueryExecutionErrors.scala    | 39 +++++++++++++---------
 .../resources/sql-tests/results/ansi/array.sql.out | 24 ++++++-------
 .../resources/sql-tests/results/ansi/cast.sql.out  |  2 +-
 .../ansi/decimalArithmeticOperations.sql.out       |  8 ++---
 .../sql-tests/results/ansi/interval.sql.out        |  6 ++--
 .../sql-tests/results/ansi/timestamp.sql.out       |  2 +-
 .../resources/sql-tests/results/interval.sql.out   |  4 +--
 .../sql-tests/results/postgreSQL/case.sql.out      |  6 ++--
 .../sql-tests/results/postgreSQL/float4.sql.out    |  6 ++--
 .../sql-tests/results/postgreSQL/float8.sql.out    |  2 +-
 .../sql-tests/results/postgreSQL/int8.sql.out      | 14 ++++----
 .../results/postgreSQL/select_having.sql.out       |  2 +-
 .../results/timestampNTZ/timestamp-ansi.sql.out    |  2 +-
 .../results/udf/postgreSQL/udf-case.sql.out        |  6 ++--
 .../udf/postgreSQL/udf-select_having.sql.out       |  2 +-
 .../sql/errors/QueryCompilationErrorsSuite.scala   |  2 +-
 .../sql/errors/QueryExecutionAnsiErrorsSuite.scala | 14 ++++----
 .../sql/errors/QueryExecutionErrorsSuite.scala     | 12 +++----
 20 files changed, 93 insertions(+), 74 deletions(-)

diff --git a/core/src/main/resources/error/error-classes.json 
b/core/src/main/resources/error/error-classes.json
index d6e510f659c..9cb4cb222aa 100644
--- a/core/src/main/resources/error/error-classes.json
+++ b/core/src/main/resources/error/error-classes.json
@@ -197,7 +197,7 @@
     "message" : [ "The operation is not supported: <operation>" ]
   },
   "UNTYPED_SCALA_UDF" : {
-    "message" : [ "You're using untyped Scala UDF, which does not have the 
input type information. Spark may blindly pass null to the Scala closure with 
primitive-type argument, and the closure will see the default value of the Java 
type for the null argument, e.g. `udf((x: Int) => x, IntegerType)`, the result 
is 0 for null input. To get rid of this error, you could:\n1. use typed Scala 
UDF APIs(without return type parameter), e.g. `udf((x: Int) => x)`\n2. use Java 
UDF APIs, e.g. `udf(ne [...]
+    "message" : [ "You're using untyped Scala UDF, which does not have the 
input type information. Spark may blindly pass null to the Scala closure with 
primitive-type argument, and the closure will see the default value of the Java 
type for the null argument, e.g. `udf((x: Int) => x, IntegerType)`, the result 
is 0 for null input. To get rid of this error, you could:\n1. use typed Scala 
UDF APIs(without return type parameter), e.g. `udf((x: Int) => x)`\n2. use Java 
UDF APIs, e.g. `udf(ne [...]
   },
   "WRITING_JOB_ABORTED" : {
     "message" : [ "Writing job aborted" ],
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
index 4400bedfd5d..1f8fa1e1b4c 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
@@ -48,9 +48,13 @@ trait QueryErrorsBase {
     litToErrorValue(Literal.create(v, t))
   }
 
+  private def quoteByDefault(elem: String): String = {
+    "\"" + elem + "\""
+  }
+
   // Quote sql statements in error messages.
   def toSQLStmt(text: String): String = {
-    "\"" + text.toUpperCase(Locale.ROOT) + "\""
+    quoteByDefault(text.toUpperCase(Locale.ROOT))
   }
 
   def toSQLId(parts: Seq[String]): String = {
@@ -62,6 +66,10 @@ trait QueryErrorsBase {
   }
 
   def toSQLType(t: DataType): String = {
-    "\"" + t.sql + "\""
+    quoteByDefault(t.sql)
+  }
+
+  def toSQLConf(conf: String): String = {
+    quoteByDefault(conf)
   }
 }
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
index c73b78b264c..59172682925 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
@@ -91,7 +91,8 @@ object QueryExecutionErrors extends QueryErrorsBase {
 
   def castingCauseOverflowError(t: Any, dataType: DataType): 
ArithmeticException = {
     new SparkArithmeticException(errorClass = "CAST_CAUSES_OVERFLOW",
-      messageParameters = Array(toSQLValue(t), toSQLType(dataType), 
SQLConf.ANSI_ENABLED.key))
+      messageParameters = Array(
+        toSQLValue(t), toSQLType(dataType), 
toSQLConf(SQLConf.ANSI_ENABLED.key)))
   }
 
   def cannotChangeDecimalPrecisionError(
@@ -99,9 +100,14 @@ object QueryExecutionErrors extends QueryErrorsBase {
       decimalPrecision: Int,
       decimalScale: Int,
       context: String): ArithmeticException = {
-    new SparkArithmeticException(errorClass = 
"CANNOT_CHANGE_DECIMAL_PRECISION",
-      messageParameters = Array(value.toDebugString,
-        decimalPrecision.toString, decimalScale.toString, 
SQLConf.ANSI_ENABLED.key, context))
+    new SparkArithmeticException(
+      errorClass = "CANNOT_CHANGE_DECIMAL_PRECISION",
+      messageParameters = Array(
+        value.toDebugString,
+        decimalPrecision.toString,
+        decimalScale.toString,
+        toSQLConf(SQLConf.ANSI_ENABLED.key),
+        context))
   }
 
   def invalidInputSyntaxForNumericError(
@@ -148,7 +154,8 @@ object QueryExecutionErrors extends QueryErrorsBase {
 
   def divideByZeroError(context: String): ArithmeticException = {
     new SparkArithmeticException(
-      errorClass = "DIVIDE_BY_ZERO", messageParameters = 
Array(SQLConf.ANSI_ENABLED.key, context))
+      errorClass = "DIVIDE_BY_ZERO",
+      messageParameters = Array(toSQLConf(SQLConf.ANSI_ENABLED.key), context))
   }
 
   def invalidArrayIndexError(index: Int, numElements: Int): 
ArrayIndexOutOfBoundsException = {
@@ -163,8 +170,9 @@ object QueryExecutionErrors extends QueryErrorsBase {
       index: Int,
       numElements: Int,
       key: String): ArrayIndexOutOfBoundsException = {
-    new SparkArrayIndexOutOfBoundsException(errorClass = "INVALID_ARRAY_INDEX",
-      messageParameters = Array(toSQLValue(index), toSQLValue(numElements), 
key))
+    new SparkArrayIndexOutOfBoundsException(
+      errorClass = "INVALID_ARRAY_INDEX",
+      messageParameters = Array(toSQLValue(index), toSQLValue(numElements), 
toSQLConf(key)))
   }
 
   def invalidElementAtIndexError(
@@ -173,7 +181,7 @@ object QueryExecutionErrors extends QueryErrorsBase {
     new SparkArrayIndexOutOfBoundsException(
       errorClass = "INVALID_ARRAY_INDEX_IN_ELEMENT_AT",
       messageParameters =
-        Array(toSQLValue(index), toSQLValue(numElements), 
SQLConf.ANSI_ENABLED.key))
+        Array(toSQLValue(index), toSQLValue(numElements), 
toSQLConf(SQLConf.ANSI_ENABLED.key)))
   }
 
   def mapKeyNotExistError(key: Any, context: String): NoSuchElementException = 
{
@@ -182,8 +190,9 @@ object QueryExecutionErrors extends QueryErrorsBase {
   }
 
   def invalidFractionOfSecondError(): DateTimeException = {
-    new SparkDateTimeException(errorClass = "INVALID_FRACTION_OF_SECOND",
-      Array(SQLConf.ANSI_ENABLED.key))
+    new SparkDateTimeException(
+      errorClass = "INVALID_FRACTION_OF_SECOND",
+      Array(toSQLConf(SQLConf.ANSI_ENABLED.key)))
   }
 
   def ansiDateTimeParseError(e: DateTimeParseException): 
DateTimeParseException = {
@@ -521,10 +530,10 @@ object QueryExecutionErrors extends QueryErrorsBase {
            |from $format files can be ambiguous, as the files may be written by
            |Spark 2.x or legacy versions of Hive, which uses a legacy hybrid 
calendar
            |that is different from Spark 3.0+'s Proleptic Gregorian calendar.
-           |See more details in SPARK-31404. You can set the SQL config 
'$config' or
+           |See more details in SPARK-31404. You can set the SQL config 
${toSQLConf(config)} or
            |the datasource option '$option' to 'LEGACY' to rebase the datetime 
values
            |w.r.t. the calendar difference during reading. To read the 
datetime values
-           |as it is, set the SQL config '$config' or the datasource option 
'$option'
+           |as it is, set the SQL config ${toSQLConf(config)} or the 
datasource option '$option'
            |to 'CORRECTED'.
            |""".stripMargin),
       cause = null
@@ -541,10 +550,10 @@ object QueryExecutionErrors extends QueryErrorsBase {
            |into $format files can be dangerous, as the files may be read by 
Spark 2.x
            |or legacy versions of Hive later, which uses a legacy hybrid 
calendar that
            |is different from Spark 3.0+'s Proleptic Gregorian calendar. See 
more
-           |details in SPARK-31404. You can set $config to 'LEGACY' to rebase 
the
+           |details in SPARK-31404. You can set ${toSQLConf(config)} to 
'LEGACY' to rebase the
            |datetime values w.r.t. the calendar difference during writing, to 
get maximum
-           |interoperability. Or set $config to 'CORRECTED' to write the 
datetime values
-           |as it is, if you are 100% sure that the written files will only be 
read by
+           |interoperability. Or set ${toSQLConf(config)} to 'CORRECTED' to 
write the datetime
+           |values as it is, if you are 100% sure that the written files will 
only be read by
            |Spark 3.0+ or other systems that use Proleptic Gregorian calendar.
            |""".stripMargin),
       cause = null
diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out 
b/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out
index accc1f239be..fb148bbbe19 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out
@@ -168,7 +168,7 @@ select element_at(array(1, 2, 3), 5)
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To 
return NULL instead, use 'try_element_at'. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To 
return NULL instead, use 'try_element_at'. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -177,7 +177,7 @@ select element_at(array(1, 2, 3), -5)
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To 
return NULL instead, use 'try_element_at'. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To 
return NULL instead, use 'try_element_at'. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -195,7 +195,7 @@ select elt(4, '123', '456')
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -204,7 +204,7 @@ select elt(0, '123', '456')
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -213,7 +213,7 @@ select elt(-1, '123', '456')
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -254,7 +254,7 @@ select array(1, 2, 3)[5]
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-[INVALID_ARRAY_INDEX] Invalid index: 5, numElements: 3. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_ARRAY_INDEX] Invalid index: 5, numElements: 3. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -263,7 +263,7 @@ select array(1, 2, 3)[-1]
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 3. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 3. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -337,7 +337,7 @@ select element_at(array(1, 2, 3), 5)
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To 
return NULL instead, use 'try_element_at'. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To 
return NULL instead, use 'try_element_at'. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -346,7 +346,7 @@ select element_at(array(1, 2, 3), -5)
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To 
return NULL instead, use 'try_element_at'. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To 
return NULL instead, use 'try_element_at'. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -364,7 +364,7 @@ select elt(4, '123', '456')
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -373,7 +373,7 @@ select elt(0, '123', '456')
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -382,4 +382,4 @@ select elt(-1, '123', '456')
 struct<>
 -- !query output
 org.apache.spark.SparkArrayIndexOutOfBoundsException
-[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out 
b/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out
index 114c7307370..96db4f2db42 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out
@@ -666,7 +666,7 @@ select cast('123.45' as decimal(4, 2))
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,123.45,5,2}) cannot be 
represented as Decimal(4, 2). If necessary set spark.sql.ansi.enabled to false 
to bypass this error.
+[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,123.45,5,2}) cannot be 
represented as Decimal(4, 2). If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
 == SQL(line 1, position 7) ==
 select cast('123.45' as decimal(4, 2))
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out
index 94b52d3afc1..1640875973e 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/ansi/decimalArithmeticOperations.sql.out
@@ -76,7 +76,7 @@ select (5e36BD + 0.1) + 5e36BD
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CANNOT_CHANGE_DECIMAL_PRECISION] 
Decimal(expanded,10000000000000000000000000000000000000.1,39,1}) cannot be 
represented as Decimal(38, 1). If necessary set spark.sql.ansi.enabled to false 
to bypass this error.
+[CANNOT_CHANGE_DECIMAL_PRECISION] 
Decimal(expanded,10000000000000000000000000000000000000.1,39,1}) cannot be 
represented as Decimal(38, 1). If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
 == SQL(line 1, position 7) ==
 select (5e36BD + 0.1) + 5e36BD
        ^^^^^^^^^^^^^^^^^^^^^^^
@@ -88,7 +88,7 @@ select (-4e36BD - 0.1) - 7e36BD
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CANNOT_CHANGE_DECIMAL_PRECISION] 
Decimal(expanded,-11000000000000000000000000000000000000.1,39,1}) cannot be 
represented as Decimal(38, 1). If necessary set spark.sql.ansi.enabled to false 
to bypass this error.
+[CANNOT_CHANGE_DECIMAL_PRECISION] 
Decimal(expanded,-11000000000000000000000000000000000000.1,39,1}) cannot be 
represented as Decimal(38, 1). If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
 == SQL(line 1, position 7) ==
 select (-4e36BD - 0.1) - 7e36BD
        ^^^^^^^^^^^^^^^^^^^^^^^^
@@ -100,7 +100,7 @@ select 12345678901234567890.0 * 12345678901234567890.0
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CANNOT_CHANGE_DECIMAL_PRECISION] 
Decimal(expanded,152415787532388367501905199875019052100,39,0}) cannot be 
represented as Decimal(38, 2). If necessary set spark.sql.ansi.enabled to false 
to bypass this error.
+[CANNOT_CHANGE_DECIMAL_PRECISION] 
Decimal(expanded,152415787532388367501905199875019052100,39,0}) cannot be 
represented as Decimal(38, 2). If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
 == SQL(line 1, position 7) ==
 select 12345678901234567890.0 * 12345678901234567890.0
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -112,7 +112,7 @@ select 1e35BD / 0.1
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CANNOT_CHANGE_DECIMAL_PRECISION] 
Decimal(expanded,1000000000000000000000000000000000000,37,0}) cannot be 
represented as Decimal(38, 6). If necessary set spark.sql.ansi.enabled to false 
to bypass this error.
+[CANNOT_CHANGE_DECIMAL_PRECISION] 
Decimal(expanded,1000000000000000000000000000000000000,37,0}) cannot be 
represented as Decimal(38, 6). If necessary set "spark.sql.ansi.enabled" to 
false to bypass this error.
 == SQL(line 1, position 7) ==
 select 1e35BD / 0.1
        ^^^^^^^^^^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out 
b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
index f5687f22b2d..94d69354546 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
@@ -228,7 +228,7 @@ select interval '2 seconds' / 0
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 7) ==
 select interval '2 seconds' / 0
        ^^^^^^^^^^^^^^^^^^^^^^^^
@@ -264,7 +264,7 @@ select interval '2' year / 0
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 7) ==
 select interval '2' year / 0
        ^^^^^^^^^^^^^^^^^^^^^
@@ -664,7 +664,7 @@ select make_interval(0, 0, 0, 0, 0, 0, 1234567890123456789)
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,1234567890123456789,20,0}) 
cannot be represented as Decimal(18, 6). If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[CANNOT_CHANGE_DECIMAL_PRECISION] Decimal(expanded,1234567890123456789,20,0}) 
cannot be represented as Decimal(18, 6). If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 == SQL(line 1, position 7) ==
 select make_interval(0, 0, 0, 0, 0, 0, 1234567890123456789)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out 
b/sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out
index 16255ae3fde..5183a4d9a7c 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/timestamp.sql.out
@@ -98,7 +98,7 @@ SELECT make_timestamp(2021, 07, 11, 6, 30, 60.007)
 struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
-[INVALID_FRACTION_OF_SECOND] The fraction of sec must be zero. Valid range is 
[0, 60]. If necessary set spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_FRACTION_OF_SECOND] The fraction of sec must be zero. Valid range is 
[0, 60]. If necessary set "spark.sql.ansi.enabled" to false to bypass this 
error.
 
 
 -- !query
diff --git a/sql/core/src/test/resources/sql-tests/results/interval.sql.out 
b/sql/core/src/test/resources/sql-tests/results/interval.sql.out
index b1baa7ac392..19412d04194 100644
--- a/sql/core/src/test/resources/sql-tests/results/interval.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/interval.sql.out
@@ -204,7 +204,7 @@ select interval '2 seconds' / 0
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 7) ==
 select interval '2 seconds' / 0
        ^^^^^^^^^^^^^^^^^^^^^^^^
@@ -240,7 +240,7 @@ select interval '2' year / 0
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 7) ==
 select interval '2' year / 0
        ^^^^^^^^^^^^^^^^^^^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out
index 93b9deb9520..8932672d7a2 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out
@@ -179,7 +179,7 @@ SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 26) ==
 SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END
                           ^^^
@@ -191,7 +191,7 @@ SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 26) ==
 SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END
                           ^^^
@@ -203,7 +203,7 @@ SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 30) ==
 SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl
                               ^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out
index 905cf2c4c35..94b4ce3bb2f 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out
@@ -340,7 +340,7 @@ SELECT int(float('2147483647'))
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CAST_CAUSES_OVERFLOW] Casting 2.14748365E9 to "INT" causes overflow. To 
return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to 
false to bypass this error.
+[CAST_CAUSES_OVERFLOW] Casting 2.14748365E9 to "INT" causes overflow. To 
return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" 
to false to bypass this error.
 
 
 -- !query
@@ -357,7 +357,7 @@ SELECT int(float('-2147483900'))
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CAST_CAUSES_OVERFLOW] Casting -2.1474839E9 to "INT" causes overflow. To 
return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to 
false to bypass this error.
+[CAST_CAUSES_OVERFLOW] Casting -2.1474839E9 to "INT" causes overflow. To 
return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" 
to false to bypass this error.
 
 
 -- !query
@@ -390,7 +390,7 @@ SELECT bigint(float('-9223380000000000000'))
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CAST_CAUSES_OVERFLOW] Casting -9.22338E18 to "BIGINT" causes overflow. To 
return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to 
false to bypass this error.
+[CAST_CAUSES_OVERFLOW] Casting -9.22338E18 to "BIGINT" causes overflow. To 
return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" 
to false to bypass this error.
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out
index 88d9565f0fb..3cdbe4c4f9a 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out
@@ -845,7 +845,7 @@ SELECT bigint(double('-9223372036854780000'))
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CAST_CAUSES_OVERFLOW] Casting -9.22337203685478E18D to "BIGINT" causes 
overflow. To return NULL instead, use 'try_cast'. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[CAST_CAUSES_OVERFLOW] Casting -9.22337203685478E18D to "BIGINT" causes 
overflow. To return NULL instead, use 'try_cast'. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out
index 157cd39d767..54b3c4410ac 100755
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out
@@ -575,7 +575,7 @@ select bigint('9223372036854775800') / bigint('0')
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 7) ==
 select bigint('9223372036854775800') / bigint('0')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -587,7 +587,7 @@ select bigint('-9223372036854775808') / smallint('0')
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 7) ==
 select bigint('-9223372036854775808') / smallint('0')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -599,7 +599,7 @@ select smallint('100') / bigint('0')
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 7) ==
 select smallint('100') / bigint('0')
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -619,7 +619,7 @@ SELECT CAST(q1 AS int) FROM int8_tbl WHERE q2 <> 456
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CAST_CAUSES_OVERFLOW] Casting 4567890123456789L to "INT" causes overflow. To 
return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled to 
false to bypass this error.
+[CAST_CAUSES_OVERFLOW] Casting 4567890123456789L to "INT" causes overflow. To 
return NULL instead, use 'try_cast'. If necessary set "spark.sql.ansi.enabled" 
to false to bypass this error.
 
 
 -- !query
@@ -636,7 +636,7 @@ SELECT CAST(q1 AS smallint) FROM int8_tbl WHERE q2 <> 456
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CAST_CAUSES_OVERFLOW] Casting 4567890123456789L to "SMALLINT" causes 
overflow. To return NULL instead, use 'try_cast'. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[CAST_CAUSES_OVERFLOW] Casting 4567890123456789L to "SMALLINT" causes 
overflow. To return NULL instead, use 'try_cast'. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -673,7 +673,7 @@ SELECT CAST(double('922337203685477580700.0') AS bigint)
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CAST_CAUSES_OVERFLOW] Casting 9.223372036854776E20D to "BIGINT" causes 
overflow. To return NULL instead, use 'try_cast'. If necessary set 
spark.sql.ansi.enabled to false to bypass this error.
+[CAST_CAUSES_OVERFLOW] Casting 9.223372036854776E20D to "BIGINT" causes 
overflow. To return NULL instead, use 'try_cast'. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
@@ -745,7 +745,7 @@ SELECT string(int(shiftleft(bigint(-1), 63))+1)
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[CAST_CAUSES_OVERFLOW] Casting -9223372036854775808L to "INT" causes overflow. 
To return NULL instead, use 'try_cast'. If necessary set spark.sql.ansi.enabled 
to false to bypass this error.
+[CAST_CAUSES_OVERFLOW] Casting -9223372036854775808L to "INT" causes overflow. 
To return NULL instead, use 'try_cast'. If necessary set 
"spark.sql.ansi.enabled" to false to bypass this error.
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
index 43e4de5cb58..618f57b1cf0 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
@@ -177,7 +177,7 @@ SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 39) ==
 ...1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2
                                    ^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out
index 0911d814b34..920f3a7462b 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out
@@ -98,7 +98,7 @@ SELECT make_timestamp(2021, 07, 11, 6, 30, 60.007)
 struct<>
 -- !query output
 org.apache.spark.SparkDateTimeException
-[INVALID_FRACTION_OF_SECOND] The fraction of sec must be zero. Valid range is 
[0, 60]. If necessary set spark.sql.ansi.enabled to false to bypass this error.
+[INVALID_FRACTION_OF_SECOND] The fraction of sec must be zero. Valid range is 
[0, 60]. If necessary set "spark.sql.ansi.enabled" to false to bypass this 
error.
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out
index cd1e2306cad..b6591be87a8 100755
--- 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out
@@ -179,7 +179,7 @@ SELECT CASE WHEN udf(1=0) THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 
END
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 31) ==
 SELECT CASE WHEN udf(1=0) THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END
                                ^^^
@@ -191,7 +191,7 @@ SELECT CASE 1 WHEN 0 THEN 1/udf(0) WHEN 1 THEN 1 ELSE 2/0 
END
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 54) ==
 ...HEN 1/udf(0) WHEN 1 THEN 1 ELSE 2/0 END
                                    ^^^
@@ -203,7 +203,7 @@ SELECT CASE WHEN i > 100 THEN udf(1/0) ELSE udf(0) END FROM 
case_tbl
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 34) ==
 ...LECT CASE WHEN i > 100 THEN udf(1/0) ELSE udf(0) END FROM case_tbl
                                    ^^^
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
index a6ade1ea159..60043b7b01d 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
@@ -177,7 +177,7 @@ SELECT 1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 
1 < 2
 struct<>
 -- !query output
 org.apache.spark.SparkArithmeticException
-[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set spark.sql.ansi.enabled to false (except for ANSI interval type) 
to bypass this error.
+[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) 
to bypass this error.
 == SQL(line 1, position 39) ==
 ...1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 1 < 2
                                    ^^^^^^^^
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
index 66d7465c184..8b63ba52ab8 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
@@ -227,7 +227,7 @@ class QueryCompilationErrorsSuite
         "2. use Java UDF APIs, e.g. `udf(new UDF1[String, Integer] { " +
         "override def call(s: String): Integer = s.length() }, IntegerType)`, 
" +
         "if input types are all non primitive\n" +
-        s"3. set ${SQLConf.LEGACY_ALLOW_UNTYPED_SCALA_UDF.key} to true and " +
+        s"""3. set "${SQLConf.LEGACY_ALLOW_UNTYPED_SCALA_UDF.key}" to true and 
""" +
         s"use this API with caution")
   }
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala
index fa44036f90c..b49440a770e 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala
@@ -24,6 +24,8 @@ import org.apache.spark.sql.internal.SQLConf
 class QueryExecutionAnsiErrorsSuite extends QueryTest with 
QueryErrorsSuiteBase {
   override def sparkConf: SparkConf = 
super.sparkConf.set(SQLConf.ANSI_ENABLED.key, "true")
 
+  private val ansiConf = "\"" + SQLConf.ANSI_ENABLED.key + "\""
+
   test("CAST_CAUSES_OVERFLOW: from timestamp to int") {
     checkErrorClass(
       exception = intercept[SparkArithmeticException] {
@@ -33,7 +35,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with 
QueryErrorsSuiteBase
       msg =
         "Casting 253402258394567890L to \"INT\" causes overflow. " +
         "To return NULL instead, use 'try_cast'. " +
-        "If necessary set spark.sql.ansi.enabled to false to bypass this 
error.",
+        s"If necessary set $ansiConf to false to bypass this error.",
       sqlState = Some("22005"))
   }
 
@@ -45,7 +47,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with 
QueryErrorsSuiteBase
       errorClass = "DIVIDE_BY_ZERO",
       msg =
         "divide by zero. To return NULL instead, use 'try_divide'. If 
necessary set " +
-        "spark.sql.ansi.enabled to false (except for ANSI interval type) to 
bypass this error." +
+        s"$ansiConf to false (except for ANSI interval type) to bypass this 
error." +
         """
           |== SQL(line 1, position 7) ==
           |select 6/0
@@ -61,7 +63,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with 
QueryErrorsSuiteBase
       },
       errorClass = "INVALID_FRACTION_OF_SECOND",
       msg = "The fraction of sec must be zero. Valid range is [0, 60]. " +
-        "If necessary set spark.sql.ansi.enabled to false to bypass this 
error. ",
+        s"If necessary set $ansiConf to false to bypass this error. ",
       sqlState = Some("22023"))
   }
 
@@ -73,7 +75,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with 
QueryErrorsSuiteBase
       errorClass = "CANNOT_CHANGE_DECIMAL_PRECISION",
       msg =
         "Decimal(expanded,66666666666666.666,17,3}) cannot be represented as 
Decimal(8, 1). " +
-        "If necessary set spark.sql.ansi.enabled to false to bypass this 
error." +
+        s"If necessary set $ansiConf to false to bypass this error." +
         """
           |== SQL(line 1, position 7) ==
           |select CAST('66666666666666.666' AS DECIMAL(8, 1))
@@ -89,7 +91,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with 
QueryErrorsSuiteBase
       },
       errorClass = "INVALID_ARRAY_INDEX",
       msg = "Invalid index: 8, numElements: 5. " +
-        "If necessary set spark.sql.ansi.enabled to false to bypass this 
error."
+        s"If necessary set $ansiConf to false to bypass this error."
     )
   }
 
@@ -101,7 +103,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with 
QueryErrorsSuiteBase
       errorClass = "INVALID_ARRAY_INDEX_IN_ELEMENT_AT",
       msg = "Invalid index: 8, numElements: 5. " +
         "To return NULL instead, use 'try_element_at'. " +
-        "If necessary set spark.sql.ansi.enabled to false to bypass this 
error."
+        s"If necessary set $ansiConf to false to bypass this error."
     )
   }
 }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
index 418b5d211d6..f84f159f6f0 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
@@ -206,7 +206,7 @@ class QueryExecutionErrorsSuite
       }.getCause.asInstanceOf[SparkUpgradeException]
 
       val format = "Parquet"
-      val config = SQLConf.PARQUET_REBASE_MODE_IN_READ.key
+      val config = "\"" + SQLConf.PARQUET_REBASE_MODE_IN_READ.key + "\""
       val option = "datetimeRebaseMode"
       checkErrorClass(
         exception = e,
@@ -218,10 +218,10 @@ class QueryExecutionErrorsSuite
             |from $format files can be ambiguous, as the files may be written 
by
             |Spark 2.x or legacy versions of Hive, which uses a legacy hybrid 
calendar
             |that is different from Spark 3.0+'s Proleptic Gregorian calendar.
-            |See more details in SPARK-31404. You can set the SQL config 
'$config' or
+            |See more details in SPARK-31404. You can set the SQL config 
$config or
             |the datasource option '$option' to 'LEGACY' to rebase the 
datetime values
             |w.r.t. the calendar difference during reading. To read the 
datetime values
-            |as it is, set the SQL config '$config' or the datasource option 
'$option'
+            |as it is, set the SQL config $config or the datasource option 
'$option'
             |to 'CORRECTED'.
             |""".stripMargin)
     }
@@ -235,7 +235,7 @@ class QueryExecutionErrorsSuite
         }.getCause.getCause.getCause.asInstanceOf[SparkUpgradeException]
 
         val format = "Parquet"
-        val config = SQLConf.PARQUET_REBASE_MODE_IN_WRITE.key
+        val config = "\"" + SQLConf.PARQUET_REBASE_MODE_IN_WRITE.key + "\""
         checkErrorClass(
           exception = e,
           errorClass = "INCONSISTENT_BEHAVIOR_CROSS_VERSION",
@@ -248,8 +248,8 @@ class QueryExecutionErrorsSuite
               |is different from Spark 3.0+'s Proleptic Gregorian calendar. 
See more
               |details in SPARK-31404. You can set $config to 'LEGACY' to 
rebase the
               |datetime values w.r.t. the calendar difference during writing, 
to get maximum
-              |interoperability. Or set $config to 'CORRECTED' to write the 
datetime values
-              |as it is, if you are 100% sure that the written files will only 
be read by
+              |interoperability. Or set $config to 'CORRECTED' to write the 
datetime
+              |values as it is, if you are 100% sure that the written files 
will only be read by
               |Spark 3.0+ or other systems that use Proleptic Gregorian 
calendar.
               |""".stripMargin)
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to