This is an automated email from the ASF dual-hosted git repository.
maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 625afb4e1ae [SPARK-39255][SQL] Improve error messages
625afb4e1ae is described below
commit 625afb4e1aefda59191d79b31f8c94941aedde1e
Author: Max Gekk <[email protected]>
AuthorDate: Tue May 24 14:15:38 2022 +0300
[SPARK-39255][SQL] Improve error messages
### What changes were proposed in this pull request?
In the PR, I propose to improve errors of the following error classes:
1. NON_PARTITION_COLUMN - `a non-partition column name` -> `the
non-partition column`
2. UNSUPPORTED_SAVE_MODE - `a not existent path` -> `a non existent path`.
3. INVALID_FIELD_NAME. Quote ids to follow the rules
https://github.com/apache/spark/pull/36621.
4. FAILED_SET_ORIGINAL_PERMISSION_BACK. It is renamed to
FAILED_PERMISSION_RESET_ORIGINAL.
5. NON_LITERAL_PIVOT_VALUES - Wrap error's expression by double quotes. The
PR adds new helper method `toSQLExpr()` for that.
6. CAST_INVALID_INPUT - Add the recommendation: `... Correct the syntax for
the value before casting it, or change the type to one appropriate for the
value.`
### Why are the changes needed?
To improve user experience with Spark SQL by making error message more
clear.
### Does this PR introduce _any_ user-facing change?
Yes, it changes user-facing error messages.
### How was this patch tested?
By running the affected test suites:
```
$ build/sbt "sql/testOnly org.apache.spark.sql.SQLQueryTestSuite"
$ build/sbt "sql/testOnly *QueryCompilationErrorsDSv2Suite"
$ build/sbt "sql/testOnly *QueryCompilationErrorsSuite"
$ build/sbt "sql/testOnly *QueryExecutionAnsiErrorsSuite"
$ build/sbt "sql/testOnly *QueryExecutionErrorsSuite"
$ build/sbt "sql/testOnly *QueryParsingErrorsSuite*"
```
Closes #36635 from MaxGekk/error-class-improve-msg-3.
Lead-authored-by: Max Gekk <[email protected]>
Co-authored-by: Maxim Gekk <[email protected]>
Signed-off-by: Max Gekk <[email protected]>
---
core/src/main/resources/error/error-classes.json | 14 ++---
.../spark/sql/errors/QueryCompilationErrors.scala | 4 +-
.../apache/spark/sql/errors/QueryErrorsBase.scala | 2 +
.../spark/sql/errors/QueryExecutionErrors.scala | 2 +-
.../apache/spark/sql/types/StructTypeSuite.scala | 22 ++++---
.../resources/sql-tests/results/ansi/cast.sql.out | 68 +++++++++++-----------
.../resources/sql-tests/results/ansi/date.sql.out | 6 +-
.../results/ansi/datetime-parsing-invalid.sql.out | 4 +-
.../sql-tests/results/ansi/interval.sql.out | 20 +++----
.../results/ansi/string-functions.sql.out | 8 +--
.../test/resources/sql-tests/results/pivot.sql.out | 2 +-
.../sql-tests/results/postgreSQL/boolean.sql.out | 32 +++++-----
.../sql-tests/results/postgreSQL/float4.sql.out | 8 +--
.../sql-tests/results/postgreSQL/float8.sql.out | 8 +--
.../sql-tests/results/postgreSQL/text.sql.out | 4 +-
.../results/postgreSQL/window_part2.sql.out | 2 +-
.../results/postgreSQL/window_part3.sql.out | 2 +-
.../results/postgreSQL/window_part4.sql.out | 2 +-
.../results/timestampNTZ/timestamp-ansi.sql.out | 2 +-
.../sql-tests/results/udf/udf-pivot.sql.out | 2 +-
.../errors/QueryCompilationErrorsDSv2Suite.scala | 4 +-
.../sql/errors/QueryCompilationErrorsSuite.scala | 5 +-
.../sql/errors/QueryExecutionAnsiErrorsSuite.scala | 3 +-
.../sql/errors/QueryExecutionErrorsSuite.scala | 6 +-
24 files changed, 119 insertions(+), 113 deletions(-)
diff --git a/core/src/main/resources/error/error-classes.json
b/core/src/main/resources/error/error-classes.json
index eb328c6e20a..23f99524a7e 100644
--- a/core/src/main/resources/error/error-classes.json
+++ b/core/src/main/resources/error/error-classes.json
@@ -23,7 +23,7 @@
"message" : [ "Cannot up cast <value> from <sourceType> to
<targetType>.\n<details>" ]
},
"CAST_INVALID_INPUT" : {
- "message" : [ "The value <value> of the type <sourceType> cannot be cast
to <targetType> because it is malformed. To return NULL instead, use
`try_cast`. If necessary set <config> to \"false\" to bypass this error." ],
+ "message" : [ "The value <value> of the type <sourceType> cannot be cast
to <targetType> because it is malformed. Correct the value as per the syntax,
or change its target type. To return NULL instead, use `try_cast`. If necessary
set <config> to \"false\" to bypass this error." ],
"sqlState" : "42000"
},
"CAST_OVERFLOW" : {
@@ -52,9 +52,6 @@
"message" : [ "Failed to rename <sourcePath> to <targetPath> as
destination already exists" ],
"sqlState" : "22023"
},
- "FAILED_SET_ORIGINAL_PERMISSION_BACK" : {
- "message" : [ "Failed to set original permission <permission> back to the
created path: <path>. Exception: <message>" ]
- },
"FORBIDDEN_OPERATION" : {
"message" : [ "The operation <statement> is not allowed on <objectType>:
<objectName>" ]
},
@@ -164,11 +161,11 @@
"message" : [ "more than one row returned by a subquery used as an
expression: <plan>" ]
},
"NON_LITERAL_PIVOT_VALUES" : {
- "message" : [ "Literal expressions required for pivot values, found
'<expression>'" ],
+ "message" : [ "Literal expressions required for pivot values, found
<expression>." ],
"sqlState" : "42000"
},
"NON_PARTITION_COLUMN" : {
- "message" : [ "PARTITION clause cannot contain a non-partition column
name: <columnName>" ],
+ "message" : [ "PARTITION clause cannot contain the non-partition column:
<columnName>." ],
"sqlState" : "42000"
},
"NO_HANDLER_FOR_UDAF" : {
@@ -197,6 +194,9 @@
"message" : [ "Failed to rename as <sourcePath> was not found" ],
"sqlState" : "22023"
},
+ "RESET_PERMISSION_TO_ORIGINAL" : {
+ "message" : [ "Failed to set original permission <permission> back to the
created path: <path>. Exception: <message>" ]
+ },
"SECOND_FUNCTION_ARGUMENT_NOT_INTEGER" : {
"message" : [ "The second argument of '<functionName>' function needs to
be an integer." ],
"sqlState" : "22023"
@@ -322,7 +322,7 @@
"message" : [ "an existent path." ]
},
"NON_EXISTENT_PATH" : {
- "message" : [ "a not existent path." ]
+ "message" : [ "a non-existent path." ]
}
}
},
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
index 008f13961a6..a9885c6c41a 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
@@ -82,7 +82,7 @@ object QueryCompilationErrors extends QueryErrorsBase {
def nonLiteralPivotValError(pivotVal: Expression): Throwable = {
new AnalysisException(
errorClass = "NON_LITERAL_PIVOT_VALUES",
- messageParameters = Array(pivotVal.toString))
+ messageParameters = Array(toSQLExpr(pivotVal)))
}
def pivotValDataTypeMismatchError(pivotVal: Expression, pivotCol:
Expression): Throwable = {
@@ -2364,7 +2364,7 @@ object QueryCompilationErrors extends QueryErrorsBase {
def invalidFieldName(fieldName: Seq[String], path: Seq[String], context:
Origin): Throwable = {
new AnalysisException(
errorClass = "INVALID_FIELD_NAME",
- messageParameters = Array(fieldName.quoted, path.quoted),
+ messageParameters = Array(toSQLId(fieldName), toSQLId(path)),
origin = context)
}
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
index 5253f0ec877..7369fd82cb8 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
@@ -39,6 +39,8 @@ import org.apache.spark.sql.types.{DataType, DoubleType,
FloatType}
* For example: "spark.sql.ansi.enabled".
* 6. Any values of datasource options or SQL configs shall be double quoted.
* For example: "true", "CORRECTED".
+ * 7. SQL expressions shall be wrapped by double quotes.
+ * For example: "earnings + 1".
*/
trait QueryErrorsBase {
// Converts an error class parameter to its SQL representation
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
index f79b30f0d0f..937dee32177 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
@@ -1659,7 +1659,7 @@ object QueryExecutionErrors extends QueryErrorsBase {
permission: FsPermission,
path: Path,
e: Throwable): Throwable = {
- new SparkSecurityException(errorClass =
"FAILED_SET_ORIGINAL_PERMISSION_BACK",
+ new SparkSecurityException(errorClass = "RESET_PERMISSION_TO_ORIGINAL",
Array(permission.toString, path.toString, e.getMessage))
}
diff --git
a/sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
b/sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
index ef29f7b9cbb..3aca7b1e52e 100644
---
a/sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
+++
b/sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
@@ -321,7 +321,8 @@ class StructTypeSuite extends SparkFunSuite with SQLHelper {
var e = intercept[AnalysisException] {
check(Seq("S1", "S12", "S123"), None)
}
- assert(e.getMessage.contains("Field name S1.S12.S123 is invalid: s1.s12 is
not a struct"))
+ assert(e.getMessage.contains(
+ "Field name `S1`.`S12`.`S123` is invalid: `s1`.`s12` is not a struct"))
// ambiguous name
e = intercept[AnalysisException] {
@@ -335,17 +336,19 @@ class StructTypeSuite extends SparkFunSuite with
SQLHelper {
e = intercept[AnalysisException] {
check(Seq("m1", "key"), None)
}
- assert(e.getMessage.contains("Field name m1.key is invalid: m1 is not a
struct"))
+ assert(e.getMessage.contains("Field name `m1`.`key` is invalid: `m1` is
not a struct"))
checkCollection(Seq("m1", "key"), Some(Seq("m1") -> StructField("key",
IntegerType, false)))
checkCollection(Seq("M1", "value"), Some(Seq("m1") -> StructField("value",
IntegerType)))
e = intercept[AnalysisException] {
checkCollection(Seq("M1", "key", "name"), None)
}
- assert(e.getMessage.contains("Field name M1.key.name is invalid: m1.key is
not a struct"))
+ assert(e.getMessage.contains(
+ "Field name `M1`.`key`.`name` is invalid: `m1`.`key` is not a struct"))
e = intercept[AnalysisException] {
checkCollection(Seq("M1", "value", "name"), None)
}
- assert(e.getMessage.contains("Field name M1.value.name is invalid:
m1.value is not a struct"))
+ assert(e.getMessage.contains(
+ "Field name `M1`.`value`.`name` is invalid: `m1`.`value` is not a
struct"))
// map of struct
checkCollection(Seq("M2", "key", "A"),
@@ -357,24 +360,25 @@ class StructTypeSuite extends SparkFunSuite with
SQLHelper {
e = intercept[AnalysisException] {
checkCollection(Seq("m2", "key", "A", "name"), None)
}
- assert(e.getMessage.contains("Field name m2.key.A.name is invalid:
m2.key.a is not a struct"))
+ assert(e.getMessage.contains(
+ "Field name `m2`.`key`.`A`.`name` is invalid: `m2`.`key`.`a` is not a
struct"))
e = intercept[AnalysisException] {
checkCollection(Seq("M2", "value", "b", "name"), None)
}
assert(e.getMessage.contains(
- "Field name M2.value.b.name is invalid: m2.value.b is not a struct"))
+ "Field name `M2`.`value`.`b`.`name` is invalid: `m2`.`value`.`b` is not
a struct"))
// simple array type
e = intercept[AnalysisException] {
check(Seq("A1", "element"), None)
}
- assert(e.getMessage.contains("Field name A1.element is invalid: a1 is not
a struct"))
+ assert(e.getMessage.contains("Field name `A1`.`element` is invalid: `a1`
is not a struct"))
checkCollection(Seq("A1", "element"), Some(Seq("a1") ->
StructField("element", IntegerType)))
e = intercept[AnalysisException] {
checkCollection(Seq("A1", "element", "name"), None)
}
assert(e.getMessage.contains(
- "Field name A1.element.name is invalid: a1.element is not a struct"))
+ "Field name `A1`.`element`.`name` is invalid: `a1`.`element` is not a
struct"))
// array of struct
checkCollection(Seq("A2", "element", "C"),
@@ -384,7 +388,7 @@ class StructTypeSuite extends SparkFunSuite with SQLHelper {
checkCollection(Seq("a2", "element", "C", "name"), None)
}
assert(e.getMessage.contains(
- "Field name a2.element.C.name is invalid: a2.element.c is not a struct"))
+ "Field name `a2`.`element`.`C`.`name` is invalid: `a2`.`element`.`c` is
not a struct"))
}
test("SPARK-36807: Merge ANSI interval types to a tightest common type") {
diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out
b/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out
index c2d94823bcb..891cd34b7c5 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/cast.sql.out
@@ -8,7 +8,7 @@ SELECT CAST('1.23' AS int)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '1.23' of the type "STRING" cannot be cast to
"INT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '1.23' of the type "STRING" cannot be cast to
"INT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('1.23' AS int)
^^^^^^^^^^^^^^^^^^^
@@ -20,7 +20,7 @@ SELECT CAST('1.23' AS long)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '1.23' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '1.23' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('1.23' AS long)
^^^^^^^^^^^^^^^^^^^^
@@ -32,7 +32,7 @@ SELECT CAST('-4.56' AS int)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '-4.56' of the type "STRING" cannot be cast to
"INT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '-4.56' of the type "STRING" cannot be cast to
"INT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('-4.56' AS int)
^^^^^^^^^^^^^^^^^^^^
@@ -44,7 +44,7 @@ SELECT CAST('-4.56' AS long)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '-4.56' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '-4.56' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('-4.56' AS long)
^^^^^^^^^^^^^^^^^^^^^
@@ -56,7 +56,7 @@ SELECT CAST('abc' AS int)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'abc' of the type "STRING" cannot be cast to
"INT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'abc' of the type "STRING" cannot be cast to
"INT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('abc' AS int)
^^^^^^^^^^^^^^^^^^
@@ -68,7 +68,7 @@ SELECT CAST('abc' AS long)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'abc' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'abc' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('abc' AS long)
^^^^^^^^^^^^^^^^^^^
@@ -80,7 +80,7 @@ SELECT CAST('abc' AS float)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'abc' of the type "STRING" cannot be cast to
"FLOAT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'abc' of the type "STRING" cannot be cast to
"FLOAT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('abc' AS float)
^^^^^^^^^^^^^^^^^^^^
@@ -92,7 +92,7 @@ SELECT CAST('abc' AS double)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'abc' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'abc' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('abc' AS double)
^^^^^^^^^^^^^^^^^^^^^
@@ -104,7 +104,7 @@ SELECT CAST('1234567890123' AS int)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '1234567890123' of the type "STRING" cannot be
cast to "INT" because it is malformed. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '1234567890123' of the type "STRING" cannot be
cast to "INT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('1234567890123' AS int)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -116,7 +116,7 @@ SELECT CAST('12345678901234567890123' AS long)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '12345678901234567890123' of the type "STRING"
cannot be cast to "BIGINT" because it is malformed. To return NULL instead, use
`try_cast`. If necessary set "spark.sql.ansi.enabled" to "false" to bypass this
error.
+[CAST_INVALID_INPUT] The value '12345678901234567890123' of the type "STRING"
cannot be cast to "BIGINT" because it is malformed. Correct the value as per
the syntax, or change its target type. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('12345678901234567890123' AS long)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -128,7 +128,7 @@ SELECT CAST('' AS int)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to "INT"
because it is malformed. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to "INT"
because it is malformed. Correct the value as per the syntax, or change its
target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('' AS int)
^^^^^^^^^^^^^^^
@@ -140,7 +140,7 @@ SELECT CAST('' AS long)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('' AS long)
^^^^^^^^^^^^^^^^
@@ -152,7 +152,7 @@ SELECT CAST('' AS float)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to
"FLOAT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to
"FLOAT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('' AS float)
^^^^^^^^^^^^^^^^^
@@ -164,7 +164,7 @@ SELECT CAST('' AS double)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('' AS double)
^^^^^^^^^^^^^^^^^^
@@ -192,7 +192,7 @@ SELECT CAST('123.a' AS int)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '123.a' of the type "STRING" cannot be cast to
"INT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '123.a' of the type "STRING" cannot be cast to
"INT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('123.a' AS int)
^^^^^^^^^^^^^^^^^^^^
@@ -204,7 +204,7 @@ SELECT CAST('123.a' AS long)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '123.a' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '123.a' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('123.a' AS long)
^^^^^^^^^^^^^^^^^^^^^
@@ -216,7 +216,7 @@ SELECT CAST('123.a' AS float)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '123.a' of the type "STRING" cannot be cast to
"FLOAT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '123.a' of the type "STRING" cannot be cast to
"FLOAT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('123.a' AS float)
^^^^^^^^^^^^^^^^^^^^^^
@@ -228,7 +228,7 @@ SELECT CAST('123.a' AS double)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '123.a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '123.a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('123.a' AS double)
^^^^^^^^^^^^^^^^^^^^^^^
@@ -248,7 +248,7 @@ SELECT CAST('-2147483649' AS int)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '-2147483649' of the type "STRING" cannot be
cast to "INT" because it is malformed. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '-2147483649' of the type "STRING" cannot be
cast to "INT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('-2147483649' AS int)
^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -268,7 +268,7 @@ SELECT CAST('2147483648' AS int)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '2147483648' of the type "STRING" cannot be
cast to "INT" because it is malformed. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '2147483648' of the type "STRING" cannot be
cast to "INT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('2147483648' AS int)
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -288,7 +288,7 @@ SELECT CAST('-9223372036854775809' AS long)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '-9223372036854775809' of the type "STRING"
cannot be cast to "BIGINT" because it is malformed. To return NULL instead, use
`try_cast`. If necessary set "spark.sql.ansi.enabled" to "false" to bypass this
error.
+[CAST_INVALID_INPUT] The value '-9223372036854775809' of the type "STRING"
cannot be cast to "BIGINT" because it is malformed. Correct the value as per
the syntax, or change its target type. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('-9223372036854775809' AS long)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -308,7 +308,7 @@ SELECT CAST('9223372036854775808' AS long)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '9223372036854775808' of the type "STRING"
cannot be cast to "BIGINT" because it is malformed. To return NULL instead, use
`try_cast`. If necessary set "spark.sql.ansi.enabled" to "false" to bypass this
error.
+[CAST_INVALID_INPUT] The value '9223372036854775808' of the type "STRING"
cannot be cast to "BIGINT" because it is malformed. Correct the value as per
the syntax, or change its target type. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT CAST('9223372036854775808' AS long)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -567,7 +567,7 @@ select cast('1中文' as tinyint)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '1中文' of the type "STRING" cannot be cast to
"TINYINT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '1中文' of the type "STRING" cannot be cast to
"TINYINT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast('1中文' as tinyint)
^^^^^^^^^^^^^^^^^^^^^^
@@ -579,7 +579,7 @@ select cast('1中文' as smallint)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '1中文' of the type "STRING" cannot be cast to
"SMALLINT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '1中文' of the type "STRING" cannot be cast to
"SMALLINT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast('1中文' as smallint)
^^^^^^^^^^^^^^^^^^^^^^^
@@ -591,7 +591,7 @@ select cast('1中文' as INT)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '1中文' of the type "STRING" cannot be cast to
"INT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '1中文' of the type "STRING" cannot be cast to
"INT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast('1中文' as INT)
^^^^^^^^^^^^^^^^^^
@@ -603,7 +603,7 @@ select cast('中文1' as bigint)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '中文1' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '中文1' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast('中文1' as bigint)
^^^^^^^^^^^^^^^^^^^^^
@@ -615,7 +615,7 @@ select cast('1中文' as bigint)
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '1中文' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '1中文' of the type "STRING" cannot be cast to
"BIGINT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast('1中文' as bigint)
^^^^^^^^^^^^^^^^^^^^^
@@ -646,7 +646,7 @@ struct<>
-- !query output
org.apache.spark.SparkRuntimeException
[CAST_INVALID_INPUT] The value '
- xyz
' of the type "STRING" cannot be cast to "BOOLEAN" because it is malformed. To
return NULL instead, use `try_cast`. If necessary set "spark.sql.ansi.enabled"
to "false" to bypass this error.
+ xyz
' of the type "STRING" cannot be cast to "BOOLEAN" because it is malformed.
Correct the value as per the syntax, or change its target type. To return NULL
instead, use `try_cast`. If necessary set "spark.sql.ansi.enabled" to "false"
to bypass this error.
== SQL(line 1, position 7) ==
select cast('\t\n xyz \t\r' as boolean)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -678,7 +678,7 @@ select cast('xyz' as decimal(4, 2))
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'xyz' of the type "STRING" cannot be cast to
"DECIMAL(4,2)" because it is malformed. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'xyz' of the type "STRING" cannot be cast to
"DECIMAL(4,2)" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast('xyz' as decimal(4, 2))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -698,7 +698,7 @@ select cast('a' as date)
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DATE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DATE" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast('a' as date)
^^^^^^^^^^^^^^^^^
@@ -718,7 +718,7 @@ select cast('a' as timestamp)
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"TIMESTAMP" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"TIMESTAMP" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast('a' as timestamp)
^^^^^^^^^^^^^^^^^^^^^^
@@ -738,7 +738,7 @@ select cast('a' as timestamp_ntz)
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"TIMESTAMP_NTZ" because it is malformed. To return NULL instead, use
`try_cast`. If necessary set "spark.sql.ansi.enabled" to "false" to bypass this
error.
+[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"TIMESTAMP_NTZ" because it is malformed. Correct the value as per the syntax,
or change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast('a' as timestamp_ntz)
^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -750,7 +750,7 @@ select cast(cast('inf' as double) as timestamp)
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value Infinity of the type "DOUBLE" cannot be cast to
"TIMESTAMP" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value Infinity of the type "DOUBLE" cannot be cast to
"TIMESTAMP" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast(cast('inf' as double) as timestamp)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -762,7 +762,7 @@ select cast(cast('inf' as float) as timestamp)
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value Infinity of the type "DOUBLE" cannot be cast to
"TIMESTAMP" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value Infinity of the type "DOUBLE" cannot be cast to
"TIMESTAMP" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast(cast('inf' as float) as timestamp)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/date.sql.out
b/sql/core/src/test/resources/sql-tests/results/ansi/date.sql.out
index 4ab12705ad9..dea228b3652 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/date.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/date.sql.out
@@ -232,7 +232,7 @@ select next_day("xx", "Mon")
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value 'xx' of the type "STRING" cannot be cast to
"DATE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'xx' of the type "STRING" cannot be cast to
"DATE" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select next_day("xx", "Mon")
^^^^^^^^^^^^^^^^^^^^^
@@ -327,7 +327,7 @@ select date_add('2011-11-11', '1.2')
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '1.2' of the type "STRING" cannot be cast to
"INT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '1.2' of the type "STRING" cannot be cast to
"INT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select date_add('2011-11-11', '1.2')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -438,7 +438,7 @@ select date_sub(date'2011-11-11', '1.2')
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value '1.2' of the type "STRING" cannot be cast to
"INT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '1.2' of the type "STRING" cannot be cast to
"INT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select date_sub(date'2011-11-11', '1.2')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git
a/sql/core/src/test/resources/sql-tests/results/ansi/datetime-parsing-invalid.sql.out
b/sql/core/src/test/resources/sql-tests/results/ansi/datetime-parsing-invalid.sql.out
index 293f62566f0..7b9ad7ac4a9 100644
---
a/sql/core/src/test/resources/sql-tests/results/ansi/datetime-parsing-invalid.sql.out
+++
b/sql/core/src/test/resources/sql-tests/results/ansi/datetime-parsing-invalid.sql.out
@@ -250,7 +250,7 @@ select cast("Unparseable" as timestamp)
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value 'Unparseable' of the type "STRING" cannot be
cast to "TIMESTAMP" because it is malformed. To return NULL instead, use
`try_cast`. If necessary set "spark.sql.ansi.enabled" to "false" to bypass this
error.
+[CAST_INVALID_INPUT] The value 'Unparseable' of the type "STRING" cannot be
cast to "TIMESTAMP" because it is malformed. Correct the value as per the
syntax, or change its target type. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast("Unparseable" as timestamp)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -262,7 +262,7 @@ select cast("Unparseable" as date)
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value 'Unparseable' of the type "STRING" cannot be
cast to "DATE" because it is malformed. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'Unparseable' of the type "STRING" cannot be
cast to "DATE" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select cast("Unparseable" as date)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git
a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
index 752652803e9..6a61369a63e 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out
@@ -122,7 +122,7 @@ select interval 2 second * 'a'
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select interval 2 second * 'a'
^^^^^^^^^^^^^^^^^^^^^^^
@@ -134,7 +134,7 @@ select interval 2 second / 'a'
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select interval 2 second / 'a'
^^^^^^^^^^^^^^^^^^^^^^^
@@ -146,7 +146,7 @@ select interval 2 year * 'a'
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select interval 2 year * 'a'
^^^^^^^^^^^^^^^^^^^^^
@@ -158,7 +158,7 @@ select interval 2 year / 'a'
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select interval 2 year / 'a'
^^^^^^^^^^^^^^^^^^^^^
@@ -186,7 +186,7 @@ select 'a' * interval 2 second
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select 'a' * interval 2 second
^^^^^^^^^^^^^^^^^^^^^^^
@@ -198,7 +198,7 @@ select 'a' * interval 2 year
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select 'a' * interval 2 year
^^^^^^^^^^^^^^^^^^^^^
@@ -1516,7 +1516,7 @@ select '4 11:11' - interval '4 22:12' day to minute
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value '4 11:11' of the type "STRING" cannot be cast
to "TIMESTAMP" because it is malformed. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '4 11:11' of the type "STRING" cannot be cast
to "TIMESTAMP" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select '4 11:11' - interval '4 22:12' day to minute
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -1528,7 +1528,7 @@ select '4 12:12:12' + interval '4 22:12' day to minute
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value '4 12:12:12' of the type "STRING" cannot be
cast to "TIMESTAMP" because it is malformed. To return NULL instead, use
`try_cast`. If necessary set "spark.sql.ansi.enabled" to "false" to bypass this
error.
+[CAST_INVALID_INPUT] The value '4 12:12:12' of the type "STRING" cannot be
cast to "TIMESTAMP" because it is malformed. Correct the value as per the
syntax, or change its target type. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select '4 12:12:12' + interval '4 22:12' day to minute
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -1566,7 +1566,7 @@ select str - interval '4 22:12' day to minute from
interval_view
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value '1' of the type "STRING" cannot be cast to
"TIMESTAMP" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '1' of the type "STRING" cannot be cast to
"TIMESTAMP" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select str - interval '4 22:12' day to minute from interval_view
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -1578,7 +1578,7 @@ select str + interval '4 22:12' day to minute from
interval_view
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value '1' of the type "STRING" cannot be cast to
"TIMESTAMP" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '1' of the type "STRING" cannot be cast to
"TIMESTAMP" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select str + interval '4 22:12' day to minute from interval_view
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git
a/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out
b/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out
index ce97418dd0f..b4991a5b683 100644
---
a/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out
+++
b/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out
@@ -82,7 +82,7 @@ select left("abcd", -2), left("abcd", 0), left("abcd", 'a')
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"INT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"INT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 42) ==
...t("abcd", -2), left("abcd", 0), left("abcd", 'a')
^^^^^^^^^^^^^^^^^
@@ -110,7 +110,7 @@ select right("abcd", -2), right("abcd", 0), right("abcd",
'a')
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"INT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'a' of the type "STRING" cannot be cast to
"INT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 44) ==
...("abcd", -2), right("abcd", 0), right("abcd", 'a')
^^^^^^^^^^^^^^^^^^
@@ -419,7 +419,7 @@ SELECT lpad('hi', 'invalid_length')
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'invalid_length' of the type "STRING" cannot be
cast to "INT" because it is malformed. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'invalid_length' of the type "STRING" cannot be
cast to "INT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT lpad('hi', 'invalid_length')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -431,7 +431,7 @@ SELECT rpad('hi', 'invalid_length')
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'invalid_length' of the type "STRING" cannot be
cast to "INT" because it is malformed. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'invalid_length' of the type "STRING" cannot be
cast to "INT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT rpad('hi', 'invalid_length')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/sql/core/src/test/resources/sql-tests/results/pivot.sql.out
b/sql/core/src/test/resources/sql-tests/results/pivot.sql.out
index 8e8b32d9025..ef20f8fe7e6 100644
--- a/sql/core/src/test/resources/sql-tests/results/pivot.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/pivot.sql.out
@@ -339,7 +339,7 @@ PIVOT (
struct<>
-- !query output
org.apache.spark.sql.AnalysisException
-[NON_LITERAL_PIVOT_VALUES] Literal expressions required for pivot values,
found 'course#x'
+[NON_LITERAL_PIVOT_VALUES] Literal expressions required for pivot values,
found "course".
-- !query
diff --git
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/boolean.sql.out
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/boolean.sql.out
index 630b855ac6e..6f17a0cd760 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/boolean.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/boolean.sql.out
@@ -56,7 +56,7 @@ SELECT boolean('test') AS error
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value 'test' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'test' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('test') AS error
^^^^^^^^^^^^^^^
@@ -76,7 +76,7 @@ SELECT boolean('foo') AS error
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value 'foo' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'foo' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('foo') AS error
^^^^^^^^^^^^^^
@@ -104,7 +104,7 @@ SELECT boolean('yeah') AS error
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value 'yeah' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'yeah' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('yeah') AS error
^^^^^^^^^^^^^^^
@@ -132,7 +132,7 @@ SELECT boolean('nay') AS error
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value 'nay' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'nay' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('nay') AS error
^^^^^^^^^^^^^^
@@ -144,7 +144,7 @@ SELECT boolean('on') AS true
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value 'on' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'on' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('on') AS true
^^^^^^^^^^^^^
@@ -156,7 +156,7 @@ SELECT boolean('off') AS `false`
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value 'off' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'off' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('off') AS `false`
^^^^^^^^^^^^^^
@@ -168,7 +168,7 @@ SELECT boolean('of') AS `false`
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value 'of' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'of' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('of') AS `false`
^^^^^^^^^^^^^
@@ -180,7 +180,7 @@ SELECT boolean('o') AS error
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value 'o' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'o' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('o') AS error
^^^^^^^^^^^^
@@ -192,7 +192,7 @@ SELECT boolean('on_') AS error
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value 'on_' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'on_' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('on_') AS error
^^^^^^^^^^^^^^
@@ -204,7 +204,7 @@ SELECT boolean('off_') AS error
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value 'off_' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'off_' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('off_') AS error
^^^^^^^^^^^^^^^
@@ -224,7 +224,7 @@ SELECT boolean('11') AS error
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value '11' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '11' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('11') AS error
^^^^^^^^^^^^^
@@ -244,7 +244,7 @@ SELECT boolean('000') AS error
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value '000' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '000' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('000') AS error
^^^^^^^^^^^^^^
@@ -256,7 +256,7 @@ SELECT boolean('') AS error
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean('') AS error
^^^^^^^^^^^
@@ -365,7 +365,7 @@ SELECT boolean(string(' tru e ')) AS invalid
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value ' tru e ' of the type "STRING" cannot be cast
to "BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value ' tru e ' of the type "STRING" cannot be cast
to "BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean(string(' tru e ')) AS invalid
^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -377,7 +377,7 @@ SELECT boolean(string('')) AS invalid
struct<>
-- !query output
org.apache.spark.SparkRuntimeException
-[CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value '' of the type "STRING" cannot be cast to
"BOOLEAN" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT boolean(string('')) AS invalid
^^^^^^^^^^^^^^^^^^^
@@ -524,7 +524,7 @@ INSERT INTO BOOLTBL2
struct<>
-- !query output
org.apache.spark.sql.AnalysisException
-failed to evaluate expression CAST('XXX' AS BOOLEAN): [CAST_INVALID_INPUT] The
value 'XXX' of the type "STRING" cannot be cast to "BOOLEAN" because it is
malformed. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
+failed to evaluate expression CAST('XXX' AS BOOLEAN): [CAST_INVALID_INPUT] The
value 'XXX' of the type "STRING" cannot be cast to "BOOLEAN" because it is
malformed. Correct the value as per the syntax, or change its target type. To
return NULL instead, use `try_cast`. If necessary set "spark.sql.ansi.enabled"
to "false" to bypass this error.
== SQL(line 2, position 11) ==
VALUES (boolean('XXX'))
^^^^^^^^^^^^^^
diff --git
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out
index 32b1e29b4e8..d172e2ace04 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float4.sql.out
@@ -96,7 +96,7 @@ SELECT float('N A N')
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'N A N' of the type "STRING" cannot be cast to
"FLOAT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'N A N' of the type "STRING" cannot be cast to
"FLOAT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT float('N A N')
^^^^^^^^^^^^^^
@@ -108,7 +108,7 @@ SELECT float('NaN x')
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'NaN x' of the type "STRING" cannot be cast to
"FLOAT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'NaN x' of the type "STRING" cannot be cast to
"FLOAT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT float('NaN x')
^^^^^^^^^^^^^^
@@ -120,7 +120,7 @@ SELECT float(' INFINITY x')
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value ' INFINITY x' of the type "STRING" cannot be
cast to "FLOAT" because it is malformed. To return NULL instead, use
`try_cast`. If necessary set "spark.sql.ansi.enabled" to "false" to bypass this
error.
+[CAST_INVALID_INPUT] The value ' INFINITY x' of the type "STRING" cannot be
cast to "FLOAT" because it is malformed. Correct the value as per the syntax,
or change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT float(' INFINITY x')
^^^^^^^^^^^^^^^^^^^^^^^
@@ -156,7 +156,7 @@ SELECT float(decimal('nan'))
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'nan' of the type "STRING" cannot be cast to
"DECIMAL(10,0)" because it is malformed. To return NULL instead, use
`try_cast`. If necessary set "spark.sql.ansi.enabled" to "false" to bypass this
error.
+[CAST_INVALID_INPUT] The value 'nan' of the type "STRING" cannot be cast to
"DECIMAL(10,0)" because it is malformed. Correct the value as per the syntax,
or change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 13) ==
SELECT float(decimal('nan'))
^^^^^^^^^^^^^^
diff --git
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out
index a420a890bc7..8259856ed7e 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/float8.sql.out
@@ -128,7 +128,7 @@ SELECT double('N A N')
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'N A N' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'N A N' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT double('N A N')
^^^^^^^^^^^^^^^
@@ -140,7 +140,7 @@ SELECT double('NaN x')
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'NaN x' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'NaN x' of the type "STRING" cannot be cast to
"DOUBLE" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT double('NaN x')
^^^^^^^^^^^^^^^
@@ -152,7 +152,7 @@ SELECT double(' INFINITY x')
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value ' INFINITY x' of the type "STRING" cannot be
cast to "DOUBLE" because it is malformed. To return NULL instead, use
`try_cast`. If necessary set "spark.sql.ansi.enabled" to "false" to bypass this
error.
+[CAST_INVALID_INPUT] The value ' INFINITY x' of the type "STRING" cannot be
cast to "DOUBLE" because it is malformed. Correct the value as per the syntax,
or change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
SELECT double(' INFINITY x')
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -188,7 +188,7 @@ SELECT double(decimal('nan'))
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'nan' of the type "STRING" cannot be cast to
"DECIMAL(10,0)" because it is malformed. To return NULL instead, use
`try_cast`. If necessary set "spark.sql.ansi.enabled" to "false" to bypass this
error.
+[CAST_INVALID_INPUT] The value 'nan' of the type "STRING" cannot be cast to
"DECIMAL(10,0)" because it is malformed. Correct the value as per the syntax,
or change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 14) ==
SELECT double(decimal('nan'))
^^^^^^^^^^^^^^
diff --git
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/text.sql.out
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/text.sql.out
index 7d5025d8729..50c714b7f36 100755
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/text.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/text.sql.out
@@ -65,7 +65,7 @@ select string('four: ') || 2+2
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'four: 2' of the type "STRING" cannot be cast
to "BIGINT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'four: 2' of the type "STRING" cannot be cast
to "BIGINT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select string('four: ') || 2+2
^^^^^^^^^^^^^^^^^^^^^^^
@@ -77,7 +77,7 @@ select 'four: ' || 2+2
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'four: 2' of the type "STRING" cannot be cast
to "BIGINT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'four: 2' of the type "STRING" cannot be cast
to "BIGINT" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 1, position 7) ==
select 'four: ' || 2+2
^^^^^^^^^^^^^^^
diff --git
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part2.sql.out
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part2.sql.out
index 68c55324512..9aeab7f957b 100644
---
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part2.sql.out
+++
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part2.sql.out
@@ -462,7 +462,7 @@ window w as (order by f_numeric range between
struct<>
-- !query output
org.apache.spark.SparkNumberFormatException
-[CAST_INVALID_INPUT] The value 'NaN' of the type "STRING" cannot be cast to
"INT" because it is malformed. To return NULL instead, use `try_cast`. If
necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+[CAST_INVALID_INPUT] The value 'NaN' of the type "STRING" cannot be cast to
"INT" because it is malformed. Correct the value as per the syntax, or change
its target type. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 3, position 12) ==
window w as (order by f_numeric range between
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
index 9ebc51bbc67..cad5e7d77dd 100644
---
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
+++
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
@@ -72,7 +72,7 @@ insert into datetimes values
struct<>
-- !query output
org.apache.spark.sql.AnalysisException
-failed to evaluate expression CAST('11:00 BST' AS TIMESTAMP):
[CAST_INVALID_INPUT] The value '11:00 BST' of the type "STRING" cannot be cast
to "TIMESTAMP" because it is malformed. To return NULL instead, use `try_cast`.
If necessary set "spark.sql.ansi.enabled" to "false" to bypass this error.
+failed to evaluate expression CAST('11:00 BST' AS TIMESTAMP):
[CAST_INVALID_INPUT] The value '11:00 BST' of the type "STRING" cannot be cast
to "TIMESTAMP" because it is malformed. Correct the value as per the syntax, or
change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
== SQL(line 2, position 23) ==
(1, timestamp '11:00', cast ('11:00 BST' as timestamp), cast ('1 year' as
timestamp), ...
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out
index 9178a2b276c..5020e328a7e 100644
---
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out
+++
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out
@@ -501,7 +501,7 @@ FROM (VALUES(1,1),(2,2),(3,(cast('nan' as
int))),(4,3),(5,4)) t(a,b)
struct<>
-- !query output
org.apache.spark.sql.AnalysisException
-failed to evaluate expression CAST('nan' AS INT): [CAST_INVALID_INPUT] The
value 'nan' of the type "STRING" cannot be cast to "INT" because it is
malformed. To return NULL instead, use `try_cast`. If necessary set
"spark.sql.ansi.enabled" to "false" to bypass this error.
+failed to evaluate expression CAST('nan' AS INT): [CAST_INVALID_INPUT] The
value 'nan' of the type "STRING" cannot be cast to "INT" because it is
malformed. Correct the value as per the syntax, or change its target type. To
return NULL instead, use `try_cast`. If necessary set "spark.sql.ansi.enabled"
to "false" to bypass this error.
== SQL(line 3, position 28) ==
FROM (VALUES(1,1),(2,2),(3,(cast('nan' as int))),(4,3),(5,4)) t(a,b)
^^^^^^^^^^^^^^^^^^
diff --git
a/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out
b/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out
index b1636660024..cf9f6c67050 100644
---
a/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out
+++
b/sql/core/src/test/resources/sql-tests/results/timestampNTZ/timestamp-ansi.sql.out
@@ -332,7 +332,7 @@ select to_timestamp(1)
struct<>
-- !query output
org.apache.spark.SparkDateTimeException
-[CAST_INVALID_INPUT] The value '1' of the type "STRING" cannot be cast to
"TIMESTAMP_NTZ" because it is malformed. To return NULL instead, use
`try_cast`. If necessary set "spark.sql.ansi.enabled" to "false" to bypass this
error.
+[CAST_INVALID_INPUT] The value '1' of the type "STRING" cannot be cast to
"TIMESTAMP_NTZ" because it is malformed. Correct the value as per the syntax,
or change its target type. To return NULL instead, use `try_cast`. If necessary
set "spark.sql.ansi.enabled" to "false" to bypass this error.
-- !query
diff --git
a/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out
b/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out
index 990e1db25dd..b03e8ded167 100644
--- a/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out
@@ -339,7 +339,7 @@ PIVOT (
struct<>
-- !query output
org.apache.spark.sql.AnalysisException
-[NON_LITERAL_PIVOT_VALUES] Literal expressions required for pivot values,
found 'course#x'
+[NON_LITERAL_PIVOT_VALUES] Literal expressions required for pivot values,
found "course".
-- !query
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsDSv2Suite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsDSv2Suite.scala
index f51ec15dce5..8d9d8e27735 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsDSv2Suite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsDSv2Suite.scala
@@ -74,7 +74,7 @@ class QueryCompilationErrorsDSv2Suite
checkErrorClass(
exception = e,
errorClass = "NON_PARTITION_COLUMN",
- msg = "PARTITION clause cannot contain a non-partition column name:
`id`")
+ msg = "PARTITION clause cannot contain the non-partition column:
`id`.")
}
}
@@ -91,7 +91,7 @@ class QueryCompilationErrorsDSv2Suite
checkErrorClass(
exception = e,
errorClass = "NON_PARTITION_COLUMN",
- msg = "PARTITION clause cannot contain a non-partition column name:
`data`")
+ msg = "PARTITION clause cannot contain the non-partition column:
`data`.")
}
}
}
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
index 420911e1f30..4a440dc6ab7 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
@@ -524,7 +524,7 @@ class QueryCompilationErrorsSuite
checkErrorClass(
exception = e,
errorClass = "INVALID_FIELD_NAME",
- msg = "Field name m.n is invalid: m is not a struct.; line 1 pos 27")
+ msg = "Field name `m`.`n` is invalid: `m` is not a struct.; line 1 pos
27")
}
}
@@ -544,8 +544,7 @@ class QueryCompilationErrorsSuite
agg(sum($"earnings")).collect()
},
errorClass = "NON_LITERAL_PIVOT_VALUES",
- msg = "Literal expressions required for pivot values, found
'earnings#\\w+'",
- matchMsg = true)
+ msg = """Literal expressions required for pivot values, found
"earnings".""")
}
test("UNSUPPORTED_DESERIALIZER: data type mismatch") {
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala
index e2d33f3c8d6..1a39ecc190e 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala
@@ -131,7 +131,8 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with
QueryErrorsSuiteBase
},
errorClass = "CAST_INVALID_INPUT",
msg = """The value '111111111111xe23' of the type "STRING" cannot be
cast to "DOUBLE" """ +
- """because it is malformed. To return NULL instead, use `try_cast`. If
necessary set """ +
+ "because it is malformed. Correct the value as per the syntax, " +
+ "or change its target type. To return NULL instead, use `try_cast`. If
necessary set " +
s"""$ansiConf to \"false\" to bypass this error.
|== SQL(line 1, position 7) ==
|select CAST('111111111111xe23' AS DOUBLE)
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
index e8d1afddde2..b20f884ebdc 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
@@ -443,7 +443,7 @@ class QueryExecutionErrorsSuite
exception = e1,
errorClass = "UNSUPPORTED_SAVE_MODE",
errorSubClass = Some("NON_EXISTENT_PATH"),
- msg = "The save mode NULL is not supported for: a not existent path.")
+ msg = "The save mode NULL is not supported for: a non-existent path.")
Utils.createDirectory(path)
@@ -459,7 +459,7 @@ class QueryExecutionErrorsSuite
}
}
- test("FAILED_SET_ORIGINAL_PERMISSION_BACK: can't set permission") {
+ test("RESET_PERMISSION_TO_ORIGINAL: can't set permission") {
withTable("t") {
withSQLConf(
"fs.file.impl" -> classOf[FakeFileSystemSetPermission].getName,
@@ -473,7 +473,7 @@ class QueryExecutionErrorsSuite
checkErrorClass(
exception = e.getCause.asInstanceOf[SparkSecurityException],
- errorClass = "FAILED_SET_ORIGINAL_PERMISSION_BACK",
+ errorClass = "RESET_PERMISSION_TO_ORIGINAL",
msg = "Failed to set original permission .+ " +
"back to the created path: .+\\. Exception: .+",
matchMsg = true)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]