This is an automated email from the ASF dual-hosted git repository. maxgekk pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push: new 040526391a4 [SPARK-39087][SQL] Improve messages of error classes 040526391a4 is described below commit 040526391a45ad610422a48c05aa69ba5133f922 Author: Max Gekk <max.g...@gmail.com> AuthorDate: Tue May 3 08:17:02 2022 +0300 [SPARK-39087][SQL] Improve messages of error classes ### What changes were proposed in this pull request? In the PR, I propose to modify error messages of the following error classes: - INVALID_JSON_SCHEMA_MAP_TYPE - INCOMPARABLE_PIVOT_COLUMN - INVALID_ARRAY_INDEX_IN_ELEMENT_AT - INVALID_ARRAY_INDEX - DIVIDE_BY_ZERO ### Why are the changes needed? To improve readability of error messages. ### Does this PR introduce _any_ user-facing change? Yes. It changes user-facing error messages. ### How was this patch tested? By running the modified test suites: ``` $ build/sbt "sql/testOnly *QueryCompilationErrorsSuite*" $ build/sbt "sql/testOnly *QueryExecutionErrorsSuite*" $ build/sbt "sql/testOnly *QueryExecutionAnsiErrorsSuite" $ build/sbt "test:testOnly *SparkThrowableSuite" ``` Closes #36428 from MaxGekk/error-class-improve-msg. Authored-by: Max Gekk <max.g...@gmail.com> Signed-off-by: Max Gekk <max.g...@gmail.com> --- core/src/main/resources/error/error-classes.json | 12 ++++----- .../org/apache/spark/SparkThrowableSuite.scala | 4 +-- .../spark/sql/errors/QueryCompilationErrors.scala | 6 ++--- .../expressions/ArithmeticExpressionSuite.scala | 30 +++++++++++----------- .../expressions/CollectionExpressionsSuite.scala | 4 +-- .../catalyst/expressions/ComplexTypeSuite.scala | 4 +-- .../expressions/IntervalExpressionsSuite.scala | 10 ++++---- .../expressions/StringExpressionsSuite.scala | 6 ++--- .../sql/catalyst/util/IntervalUtilsSuite.scala | 2 +- .../resources/sql-tests/results/ansi/array.sql.out | 24 ++++++++--------- .../sql-tests/results/ansi/interval.sql.out | 4 +-- .../resources/sql-tests/results/interval.sql.out | 4 +-- .../test/resources/sql-tests/results/pivot.sql.out | 4 +-- .../sql-tests/results/postgreSQL/case.sql.out | 6 ++--- .../sql-tests/results/postgreSQL/int8.sql.out | 6 ++--- .../results/postgreSQL/select_having.sql.out | 2 +- .../results/udf/postgreSQL/udf-case.sql.out | 6 ++--- .../udf/postgreSQL/udf-select_having.sql.out | 2 +- .../sql-tests/results/udf/udf-pivot.sql.out | 4 +-- .../apache/spark/sql/ColumnExpressionSuite.scala | 12 ++++----- .../org/apache/spark/sql/DataFrameSuite.scala | 2 +- .../sql/errors/QueryCompilationErrorsSuite.scala | 10 +++----- .../sql/errors/QueryExecutionAnsiErrorsSuite.scala | 8 +++--- .../sql/errors/QueryExecutionErrorsSuite.scala | 25 +++++++++--------- .../apache/spark/sql/execution/SQLViewSuite.scala | 4 +-- .../sql/streaming/FileStreamSourceSuite.scala | 2 +- 26 files changed, 101 insertions(+), 102 deletions(-) diff --git a/core/src/main/resources/error/error-classes.json b/core/src/main/resources/error/error-classes.json index aa38f8b9747..eacbeec570f 100644 --- a/core/src/main/resources/error/error-classes.json +++ b/core/src/main/resources/error/error-classes.json @@ -34,7 +34,7 @@ "sqlState" : "22008" }, "DIVIDE_BY_ZERO" : { - "message" : [ "divide by zero. To return NULL instead, use 'try_divide'. If necessary set <config> to false (except for ANSI interval type) to bypass this error.<details>" ], + "message" : [ "Division by zero. To return NULL instead, use `try_divide`. If necessary set <config> to false (except for ANSI interval type) to bypass this error.<details>" ], "sqlState" : "22012" }, "DUPLICATE_KEY" : { @@ -72,7 +72,7 @@ "message" : [ "Grouping sets size cannot be greater than <maxSize>" ] }, "INCOMPARABLE_PIVOT_COLUMN" : { - "message" : [ "Invalid pivot column '<columnName>'. Pivot columns must be comparable." ], + "message" : [ "Invalid pivot column <columnName>. Pivot columns must be comparable." ], "sqlState" : "42000" }, "INCOMPATIBLE_DATASOURCE_REGISTER" : { @@ -89,10 +89,10 @@ "message" : [ "<message>" ] }, "INVALID_ARRAY_INDEX" : { - "message" : [ "Invalid index: <indexValue>, numElements: <arraySize>. If necessary set <config> to false to bypass this error." ] + "message" : [ "The index <indexValue> is out of bounds. The array has <arraySize> elements. If necessary set <config> to false to bypass this error." ] }, "INVALID_ARRAY_INDEX_IN_ELEMENT_AT" : { - "message" : [ "Invalid index: <indexValue>, numElements: <arraySize>. To return NULL instead, use 'try_element_at'. If necessary set <config> to false to bypass this error." ] + "message" : [ "The index <indexValue> is out of bounds. The array has <arraySize> elements. To return NULL instead, use `try_element_at`. If necessary set <config> to false to bypass this error." ] }, "INVALID_FIELD_NAME" : { "message" : [ "Field name <fieldName> is invalid: <path> is not a struct." ], @@ -102,8 +102,8 @@ "message" : [ "The fraction of sec must be zero. Valid range is [0, 60]. If necessary set <config> to false to bypass this error. " ], "sqlState" : "22023" }, - "INVALID_JSON_SCHEMA_MAPTYPE" : { - "message" : [ "Input schema <dataType> can only contain StringType as a key type for a MapType." ] + "INVALID_JSON_SCHEMA_MAP_TYPE" : { + "message" : [ "Input schema <jsonSchema> can only contain STRING as a key type for a MAP." ] }, "INVALID_PANDAS_UDF_PLACEMENT" : { "message" : [ "The group aggregate pandas UDF <functionName> cannot be invoked together with as other, non-pandas aggregate functions." ] diff --git a/core/src/test/scala/org/apache/spark/SparkThrowableSuite.scala b/core/src/test/scala/org/apache/spark/SparkThrowableSuite.scala index 35606857837..23c06538933 100644 --- a/core/src/test/scala/org/apache/spark/SparkThrowableSuite.scala +++ b/core/src/test/scala/org/apache/spark/SparkThrowableSuite.scala @@ -125,8 +125,8 @@ class SparkThrowableSuite extends SparkFunSuite { // Does not fail with too many args (expects 0 args) assert(getMessage("DIVIDE_BY_ZERO", Array("foo", "bar", "baz")) == - "[DIVIDE_BY_ZERO] divide by zero. " + - "To return NULL instead, use 'try_divide'. If necessary set foo to false " + + "[DIVIDE_BY_ZERO] Division by zero. " + + "To return NULL instead, use `try_divide`. If necessary set foo to false " + "(except for ANSI interval type) to bypass this error.bar") } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala index c1a8f57272b..151c43cd92d 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala @@ -76,7 +76,7 @@ object QueryCompilationErrors extends QueryErrorsBase { def unorderablePivotColError(pivotCol: Expression): Throwable = { new AnalysisException( errorClass = "INCOMPARABLE_PIVOT_COLUMN", - messageParameters = Array(pivotCol.toString)) + messageParameters = Array(toSQLId(pivotCol.sql))) } def nonLiteralPivotValError(pivotVal: Expression): Throwable = { @@ -2369,8 +2369,8 @@ object QueryCompilationErrors extends QueryErrorsBase { def invalidJsonSchema(schema: DataType): Throwable = { new AnalysisException( - errorClass = "INVALID_JSON_SCHEMA_MAPTYPE", - messageParameters = Array(schema.toString)) + errorClass = "INVALID_JSON_SCHEMA_MAP_TYPE", + messageParameters = Array(toSQLType(schema))) } def tableIndexNotSupportedError(errorMessage: String): Throwable = { diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ArithmeticExpressionSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ArithmeticExpressionSuite.scala index 8c0defea230..4c1f40c36a7 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ArithmeticExpressionSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ArithmeticExpressionSuite.scala @@ -243,7 +243,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with ExpressionEvalHelper } withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") { checkExceptionInExpression[ArithmeticException]( - Divide(left, Literal(convert(0))), "divide by zero") + Divide(left, Literal(convert(0))), "Division by zero") } } @@ -287,7 +287,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with ExpressionEvalHelper } withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") { checkExceptionInExpression[ArithmeticException]( - IntegralDivide(left, Literal(convert(0))), "divide by zero") + IntegralDivide(left, Literal(convert(0))), "Division by zero") } } checkEvaluation(IntegralDivide(positiveLongLit, negativeLongLit), 0L) @@ -339,7 +339,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with ExpressionEvalHelper } withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") { checkExceptionInExpression[ArithmeticException]( - Remainder(left, Literal(convert(0))), "divide by zero") + Remainder(left, Literal(convert(0))), "Division by zero") } } checkEvaluation(Remainder(positiveShortLit, positiveShortLit), 0.toShort) @@ -444,7 +444,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with ExpressionEvalHelper } withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") { checkExceptionInExpression[ArithmeticException]( - Pmod(left, Literal(convert(0))), "divide by zero") + Pmod(left, Literal(convert(0))), "Division by zero") } } checkEvaluation(Pmod(Literal(-7), Literal(3)), 2) @@ -608,7 +608,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with ExpressionEvalHelper } withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") { checkExceptionInExpression[ArithmeticException]( - IntegralDivide(Literal(Decimal(0.2)), Literal(Decimal(0.0))), "divide by zero") + IntegralDivide(Literal(Decimal(0.2)), Literal(Decimal(0.0))), "Division by zero") } // overflows long and so returns a wrong result checkEvaluation(DecimalPrecision.decimalAndDecimal.apply(IntegralDivide( @@ -755,7 +755,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with ExpressionEvalHelper val zero = Literal(convert(0)) checkEvaluation(operator(Literal.create(null, one.dataType), zero), null) checkEvaluation(operator(one, Literal.create(null, zero.dataType)), null) - checkExceptionInExpression[ArithmeticException](operator(one, zero), "divide by zero") + checkExceptionInExpression[ArithmeticException](operator(one, zero), "Division by zero") } } } @@ -814,7 +814,7 @@ class ArithmeticExpressionSuite extends SparkFunSuite with ExpressionEvalHelper checkEvaluation(operator(Literal.create(null, one.dataType), zero), null) checkEvaluation(operator(one, Literal.create(null, zero.dataType)), null) checkExceptionInExpression[SparkArithmeticException](operator(one, zero), - "divide by zero") + "Division by zero") } } } @@ -862,13 +862,13 @@ class ArithmeticExpressionSuite extends SparkFunSuite with ExpressionEvalHelper } withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") { checkExceptionInExpression[ArithmeticException]( - IntegralDivide(Literal(Period.ZERO), Literal(Period.ZERO)), "divide by zero") + IntegralDivide(Literal(Period.ZERO), Literal(Period.ZERO)), "Division by zero") checkExceptionInExpression[ArithmeticException]( - IntegralDivide(Literal(Period.ofYears(1)), Literal(Period.ZERO)), "divide by zero") + IntegralDivide(Literal(Period.ofYears(1)), Literal(Period.ZERO)), "Division by zero") checkExceptionInExpression[ArithmeticException]( - IntegralDivide(Period.ofMonths(Int.MinValue), Literal(Period.ZERO)), "divide by zero") + IntegralDivide(Period.ofMonths(Int.MinValue), Literal(Period.ZERO)), "Division by zero") checkExceptionInExpression[ArithmeticException]( - IntegralDivide(Period.ofMonths(Int.MaxValue), Literal(Period.ZERO)), "divide by zero") + IntegralDivide(Period.ofMonths(Int.MaxValue), Literal(Period.ZERO)), "Division by zero") } checkEvaluation(IntegralDivide(Literal.create(null, YearMonthIntervalType()), @@ -914,16 +914,16 @@ class ArithmeticExpressionSuite extends SparkFunSuite with ExpressionEvalHelper } withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") { checkExceptionInExpression[ArithmeticException]( - IntegralDivide(Literal(Duration.ZERO), Literal(Duration.ZERO)), "divide by zero") + IntegralDivide(Literal(Duration.ZERO), Literal(Duration.ZERO)), "Division by zero") checkExceptionInExpression[ArithmeticException]( IntegralDivide(Literal(Duration.ofDays(1)), - Literal(Duration.ZERO)), "divide by zero") + Literal(Duration.ZERO)), "Division by zero") checkExceptionInExpression[ArithmeticException]( IntegralDivide(Literal(Duration.of(Long.MaxValue, ChronoUnit.MICROS)), - Literal(Duration.ZERO)), "divide by zero") + Literal(Duration.ZERO)), "Division by zero") checkExceptionInExpression[ArithmeticException]( IntegralDivide(Literal(Duration.of(Long.MinValue, ChronoUnit.MICROS)), - Literal(Duration.ZERO)), "divide by zero") + Literal(Duration.ZERO)), "Division by zero") } checkEvaluation(IntegralDivide(Literal.create(null, DayTimeIntervalType()), diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala index 3cf3b4469a4..fb4bf43ba83 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala @@ -2288,7 +2288,7 @@ class CollectionExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper val array = Literal.create(Seq(1, 2, 3), ArrayType(IntegerType)) var expr: Expression = ElementAt(array, Literal(5)) if (ansiEnabled) { - val errMsg = "Invalid index: 5, numElements: 3" + val errMsg = "The index 5 is out of bounds. The array has 3 elements." checkExceptionInExpression[Exception](expr, errMsg) } else { checkEvaluation(expr, null) @@ -2296,7 +2296,7 @@ class CollectionExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper expr = ElementAt(array, Literal(-5)) if (ansiEnabled) { - val errMsg = "Invalid index: -5, numElements: 3" + val errMsg = "The index -5 is out of bounds. The array has 3 elements." checkExceptionInExpression[Exception](expr, errMsg) } else { checkEvaluation(expr, null) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala index 755f212ab81..cdd11085edb 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ComplexTypeSuite.scala @@ -70,12 +70,12 @@ class ComplexTypeSuite extends SparkFunSuite with ExpressionEvalHelper { if (ansiEnabled) { checkExceptionInExpression[Exception]( GetArrayItem(array, Literal(5)), - "Invalid index: 5, numElements: 2" + "The index 5 is out of bounds. The array has 2 elements." ) checkExceptionInExpression[Exception]( GetArrayItem(array, Literal(-1)), - "Invalid index: -1, numElements: 2" + "The index -1 is out of bounds. The array has 2 elements." ) } else { checkEvaluation(GetArrayItem(array, Literal(5)), null) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/IntervalExpressionsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/IntervalExpressionsSuite.scala index 05f9d0f6696..b9c7629f692 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/IntervalExpressionsSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/IntervalExpressionsSuite.scala @@ -176,7 +176,7 @@ class IntervalExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper { check("2 years -8 seconds", 0.5, "4 years -16 seconds") check("-1 month 2 microseconds", -0.25, "4 months -8 microseconds") check("1 month 3 microsecond", 1.5, "2 microseconds") - check("1 second", 0, "divide by zero", Some(true)) + check("1 second", 0, "Division by zero", Some(true)) check("1 second", 0, null, Some(false)) check(s"${Int.MaxValue} months", 0.9, "integer overflow", Some(true)) check(s"${Int.MaxValue} months", 0.9, Int.MaxValue + " months", Some(false)) @@ -412,8 +412,8 @@ class IntervalExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper { } Seq( - (Period.ofMonths(1), 0) -> "divide by zero", - (Period.ofMonths(Int.MinValue), 0d) -> "divide by zero", + (Period.ofMonths(1), 0) -> "Division by zero", + (Period.ofMonths(Int.MinValue), 0d) -> "Division by zero", (Period.ofMonths(-100), Float.NaN) -> "input is infinite or NaN" ).foreach { case ((period, num), expectedErrMsg) => checkExceptionInExpression[ArithmeticException]( @@ -447,8 +447,8 @@ class IntervalExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper { } Seq( - (Duration.ofDays(1), 0) -> "divide by zero", - (Duration.ofMillis(Int.MinValue), 0d) -> "divide by zero", + (Duration.ofDays(1), 0) -> "Division by zero", + (Duration.ofMillis(Int.MinValue), 0d) -> "Division by zero", (Duration.ofSeconds(-100), Float.NaN) -> "input is infinite or NaN" ).foreach { case ((period, num), expectedErrMsg) => checkExceptionInExpression[ArithmeticException]( diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala index 4d43ab6a408..b37d8ca177f 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/StringExpressionsSuite.scala @@ -1215,7 +1215,7 @@ class StringExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper { withSQLConf(SQLConf.ANSI_ENABLED.key -> ansiEnabled.toString) { var expr: Expression = Elt(Seq(Literal(4), Literal("123"), Literal("456"))) if (ansiEnabled) { - val errMsg = "Invalid index: 4, numElements: 2" + val errMsg = "The index 4 is out of bounds. The array has 2 elements." checkExceptionInExpression[Exception](expr, errMsg) } else { checkEvaluation(expr, null) @@ -1223,7 +1223,7 @@ class StringExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper { expr = Elt(Seq(Literal(0), Literal("123"), Literal("456"))) if (ansiEnabled) { - val errMsg = "Invalid index: 0, numElements: 2" + val errMsg = "The index 0 is out of bounds. The array has 2 elements." checkExceptionInExpression[Exception](expr, errMsg) } else { checkEvaluation(expr, null) @@ -1231,7 +1231,7 @@ class StringExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper { expr = Elt(Seq(Literal(-1), Literal("123"), Literal("456"))) if (ansiEnabled) { - val errMsg = "Invalid index: -1, numElements: 2" + val errMsg = "The index -1 is out of bounds. The array has 2 elements." checkExceptionInExpression[Exception](expr, errMsg) } else { checkEvaluation(expr, null) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala index 62059c0b996..0e65886a2eb 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala @@ -307,7 +307,7 @@ class IntervalUtilsSuite extends SparkFunSuite with SQLHelper { interval = new CalendarInterval(123, 456, 789) assert(divide(interval, 0) === null) val e2 = intercept[ArithmeticException](divideExact(interval, 0)) - assert(e2.getMessage.contains("divide by zero")) + assert(e2.getMessage.contains("Division by zero")) } test("from day-time string") { diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out index fb148bbbe19..e0da937612d 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out @@ -168,7 +168,7 @@ select element_at(array(1, 2, 3), 5) struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. +[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] The index 5 is out of bounds. The array has 3 elements. To return NULL instead, use `try_element_at`. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -177,7 +177,7 @@ select element_at(array(1, 2, 3), -5) struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. +[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] The index -5 is out of bounds. The array has 3 elements. To return NULL instead, use `try_element_at`. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -195,7 +195,7 @@ select elt(4, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. +[INVALID_ARRAY_INDEX] The index 4 is out of bounds. The array has 2 elements. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -204,7 +204,7 @@ select elt(0, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. +[INVALID_ARRAY_INDEX] The index 0 is out of bounds. The array has 2 elements. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -213,7 +213,7 @@ select elt(-1, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. +[INVALID_ARRAY_INDEX] The index -1 is out of bounds. The array has 2 elements. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -254,7 +254,7 @@ select array(1, 2, 3)[5] struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: 5, numElements: 3. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. +[INVALID_ARRAY_INDEX] The index 5 is out of bounds. The array has 3 elements. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -263,7 +263,7 @@ select array(1, 2, 3)[-1] struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 3. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. +[INVALID_ARRAY_INDEX] The index -1 is out of bounds. The array has 3 elements. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -337,7 +337,7 @@ select element_at(array(1, 2, 3), 5) struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: 5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. +[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] The index 5 is out of bounds. The array has 3 elements. To return NULL instead, use `try_element_at`. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -346,7 +346,7 @@ select element_at(array(1, 2, 3), -5) struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] Invalid index: -5, numElements: 3. To return NULL instead, use 'try_element_at'. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. +[INVALID_ARRAY_INDEX_IN_ELEMENT_AT] The index -5 is out of bounds. The array has 3 elements. To return NULL instead, use `try_element_at`. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -364,7 +364,7 @@ select elt(4, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: 4, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. +[INVALID_ARRAY_INDEX] The index 4 is out of bounds. The array has 2 elements. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -373,7 +373,7 @@ select elt(0, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: 0, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. +[INVALID_ARRAY_INDEX] The index 0 is out of bounds. The array has 2 elements. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. -- !query @@ -382,4 +382,4 @@ select elt(-1, '123', '456') struct<> -- !query output org.apache.spark.SparkArrayIndexOutOfBoundsException -[INVALID_ARRAY_INDEX] Invalid index: -1, numElements: 2. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. +[INVALID_ARRAY_INDEX] The index -1 is out of bounds. The array has 2 elements. If necessary set "spark.sql.ansi.enabled" to false to bypass this error. diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out index 3cc089ebbf6..f67931e6122 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/interval.sql.out @@ -228,7 +228,7 @@ select interval '2 seconds' / 0 struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select interval '2 seconds' / 0 ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -264,7 +264,7 @@ select interval '2' year / 0 struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select interval '2' year / 0 ^^^^^^^^^^^^^^^^^^^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/interval.sql.out b/sql/core/src/test/resources/sql-tests/results/interval.sql.out index 19412d04194..bd564cb6904 100644 --- a/sql/core/src/test/resources/sql-tests/results/interval.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/interval.sql.out @@ -204,7 +204,7 @@ select interval '2 seconds' / 0 struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select interval '2 seconds' / 0 ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -240,7 +240,7 @@ select interval '2' year / 0 struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select interval '2' year / 0 ^^^^^^^^^^^^^^^^^^^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/pivot.sql.out b/sql/core/src/test/resources/sql-tests/results/pivot.sql.out index 4bc7d07fa96..8e8b32d9025 100644 --- a/sql/core/src/test/resources/sql-tests/results/pivot.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/pivot.sql.out @@ -458,7 +458,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -[INCOMPARABLE_PIVOT_COLUMN] Invalid pivot column 'm#x'. Pivot columns must be comparable. +[INCOMPARABLE_PIVOT_COLUMN] Invalid pivot column `__auto_generated_subquery_name`.`m`. Pivot columns must be comparable. -- !query @@ -475,7 +475,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -[INCOMPARABLE_PIVOT_COLUMN] Invalid pivot column 'named_struct(course, course#x, m, m#x)'. Pivot columns must be comparable. +[INCOMPARABLE_PIVOT_COLUMN] Invalid pivot column `named_struct('course', __auto_generated_subquery_name`.`course, 'm', __auto_generated_subquery_name`.`m)`. Pivot columns must be comparable. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out index 8932672d7a2..e0e59cb28bd 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out @@ -179,7 +179,7 @@ SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 26) == SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END ^^^ @@ -191,7 +191,7 @@ SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 26) == SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END ^^^ @@ -203,7 +203,7 @@ SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 30) == SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl ^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out index 54b3c4410ac..23c14d05086 100755 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out @@ -575,7 +575,7 @@ select bigint('9223372036854775800') / bigint('0') struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select bigint('9223372036854775800') / bigint('0') ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -587,7 +587,7 @@ select bigint('-9223372036854775808') / smallint('0') struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select bigint('-9223372036854775808') / smallint('0') ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -599,7 +599,7 @@ select smallint('100') / bigint('0') struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 7) == select smallint('100') / bigint('0') ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out index 618f57b1cf0..be149351363 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out @@ -177,7 +177,7 @@ SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2 struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 39) == ...1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2 ^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out index b6591be87a8..9f325325906 100755 --- a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out @@ -179,7 +179,7 @@ SELECT CASE WHEN udf(1=0) THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 31) == SELECT CASE WHEN udf(1=0) THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END ^^^ @@ -191,7 +191,7 @@ SELECT CASE 1 WHEN 0 THEN 1/udf(0) WHEN 1 THEN 1 ELSE 2/0 END struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 54) == ...HEN 1/udf(0) WHEN 1 THEN 1 ELSE 2/0 END ^^^ @@ -203,7 +203,7 @@ SELECT CASE WHEN i > 100 THEN udf(1/0) ELSE udf(0) END FROM case_tbl struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 34) == ...LECT CASE WHEN i > 100 THEN udf(1/0) ELSE udf(0) END FROM case_tbl ^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out index 60043b7b01d..4ac948d310e 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out @@ -177,7 +177,7 @@ SELECT 1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 1 < 2 struct<> -- !query output org.apache.spark.SparkArithmeticException -[DIVIDE_BY_ZERO] divide by zero. To return NULL instead, use 'try_divide'. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. +[DIVIDE_BY_ZERO] Division by zero. To return NULL instead, use `try_divide`. If necessary set "spark.sql.ansi.enabled" to false (except for ANSI interval type) to bypass this error. == SQL(line 1, position 39) == ...1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 1 < 2 ^^^^^^^^ diff --git a/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out index c61ac22eadd..990e1db25dd 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out @@ -424,7 +424,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -[INCOMPARABLE_PIVOT_COLUMN] Invalid pivot column 'm#x'. Pivot columns must be comparable. +[INCOMPARABLE_PIVOT_COLUMN] Invalid pivot column `__auto_generated_subquery_name`.`m`. Pivot columns must be comparable. -- !query @@ -441,7 +441,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -[INCOMPARABLE_PIVOT_COLUMN] Invalid pivot column 'named_struct(course, course#x, m, m#x)'. Pivot columns must be comparable. +[INCOMPARABLE_PIVOT_COLUMN] Invalid pivot column `named_struct('course', __auto_generated_subquery_name`.`course, 'm', __auto_generated_subquery_name`.`m)`. Pivot columns must be comparable. -- !query diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala index 9c40a33adab..a63fc7b9e45 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala @@ -2766,19 +2766,19 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { Seq((Period.ofYears(9999), 0)).toDF("i", "n").select($"i" / $"n").collect() }.getCause assert(e.isInstanceOf[ArithmeticException]) - assert(e.getMessage.contains("divide by zero")) + assert(e.getMessage.contains("Division by zero")) val e2 = intercept[SparkException] { Seq((Period.ofYears(9999), 0d)).toDF("i", "n").select($"i" / $"n").collect() }.getCause assert(e2.isInstanceOf[ArithmeticException]) - assert(e2.getMessage.contains("divide by zero")) + assert(e2.getMessage.contains("Division by zero")) val e3 = intercept[SparkException] { Seq((Period.ofYears(9999), BigDecimal(0))).toDF("i", "n").select($"i" / $"n").collect() }.getCause assert(e3.isInstanceOf[ArithmeticException]) - assert(e3.getMessage.contains("divide by zero")) + assert(e3.getMessage.contains("Division by zero")) } test("SPARK-34875: divide day-time interval by numeric") { @@ -2813,19 +2813,19 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { Seq((Duration.ofDays(9999), 0)).toDF("i", "n").select($"i" / $"n").collect() }.getCause assert(e.isInstanceOf[ArithmeticException]) - assert(e.getMessage.contains("divide by zero")) + assert(e.getMessage.contains("Division by zero")) val e2 = intercept[SparkException] { Seq((Duration.ofDays(9999), 0d)).toDF("i", "n").select($"i" / $"n").collect() }.getCause assert(e2.isInstanceOf[ArithmeticException]) - assert(e2.getMessage.contains("divide by zero")) + assert(e2.getMessage.contains("Division by zero")) val e3 = intercept[SparkException] { Seq((Duration.ofDays(9999), BigDecimal(0))).toDF("i", "n").select($"i" / $"n").collect() }.getCause assert(e3.isInstanceOf[ArithmeticException]) - assert(e3.getMessage.contains("divide by zero")) + assert(e3.getMessage.contains("Division by zero")) } test("SPARK-34896: return day-time interval from dates subtraction") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala index 619727acad2..47f79c9ada7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala @@ -3043,7 +3043,7 @@ class DataFrameSuite extends QueryTest ).foreach { case (schema, jsonData) => withTempDir { dir => val colName = "col" - val msg = "can only contain StringType as a key type for a MapType" + val msg = "can only contain STRING as a key type for a MAP" val thrown1 = intercept[AnalysisException]( spark.read.schema(StructType(Seq(StructField(colName, schema)))) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala index 8fffccbed40..252c7298cb5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala @@ -398,8 +398,7 @@ class QueryCompilationErrorsSuite } } - test("INVALID_JSON_SCHEMA_MAPTYPE: " + - "Parse JSON rows can only contain StringType as a key type for a MapType.") { + test("INVALID_JSON_SCHEMA_MAP_TYPE: only STRING as a key type for MAP") { val schema = StructType( StructField("map", MapType(IntegerType, IntegerType, true), false) :: Nil) @@ -407,10 +406,9 @@ class QueryCompilationErrorsSuite exception = intercept[AnalysisException] { spark.read.schema(schema).json(spark.emptyDataset[String]) }, - errorClass = "INVALID_JSON_SCHEMA_MAPTYPE", - msg = "Input schema " + - "StructType(StructField(map,MapType(IntegerType,IntegerType,true),false)) " + - "can only contain StringType as a key type for a MapType." + errorClass = "INVALID_JSON_SCHEMA_MAP_TYPE", + msg = """Input schema "STRUCT<map: MAP<INT, INT>>" """ + + "can only contain STRING as a key type for a MAP." ) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala index 220a98e27b9..ba9858bc988 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionAnsiErrorsSuite.scala @@ -47,7 +47,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with QueryErrorsSuiteBase }, errorClass = "DIVIDE_BY_ZERO", msg = - "divide by zero. To return NULL instead, use 'try_divide'. If necessary set " + + "Division by zero. To return NULL instead, use `try_divide`. If necessary set " + s"$ansiConf to false (except for ANSI interval type) to bypass this error." + """ |== SQL(line 1, position 7) == @@ -91,7 +91,7 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with QueryErrorsSuiteBase sql("select array(1, 2, 3, 4, 5)[8]").collect() }, errorClass = "INVALID_ARRAY_INDEX", - msg = "Invalid index: 8, numElements: 5. " + + msg = "The index 8 is out of bounds. The array has 5 elements. " + s"If necessary set $ansiConf to false to bypass this error." ) } @@ -102,8 +102,8 @@ class QueryExecutionAnsiErrorsSuite extends QueryTest with QueryErrorsSuiteBase sql("select element_at(array(1, 2, 3, 4, 5), 8)").collect() }, errorClass = "INVALID_ARRAY_INDEX_IN_ELEMENT_AT", - msg = "Invalid index: 8, numElements: 5. " + - "To return NULL instead, use 'try_element_at'. " + + msg = "The index 8 is out of bounds. The array has 5 elements. " + + "To return NULL instead, use `try_element_at`. " + s"If necessary set $ansiConf to false to bypass this error." ) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala index 21be7d4d75a..ac865d01e3b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala @@ -414,25 +414,26 @@ class QueryExecutionErrorsSuite trainingSales sql( """ - | select * from ( - | select *,map(sales.course, sales.year) as map - | from trainingSales + | select * + | from ( + | select *,map(sales.course, sales.year) as map + | from trainingSales | ) | pivot ( - | sum(sales.earnings) as sum - | for map in ( - | map("dotNET", 2012), map("JAVA", 2012), - | map("dotNet", 2013), map("Java", 2013) - | )) + | sum(sales.earnings) as sum + | for map in ( + | map("dotNET", 2012), map("JAVA", 2012), + | map("dotNet", 2013), map("Java", 2013) + | ) + | ) |""".stripMargin).collect() } checkErrorClass( exception = e, errorClass = "INCOMPARABLE_PIVOT_COLUMN", - msg = "Invalid pivot column 'map.*\\'. Pivot columns must be comparable.", - sqlState = Some("42000"), - matchMsg = true - ) + msg = "Invalid pivot column `__auto_generated_subquery_name`.`map`. " + + "Pivot columns must be comparable.", + sqlState = Some("42000")) } test("UNSUPPORTED_SAVE_MODE: unsupported null saveMode whether the path exists or not") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala index cc3e9f71eeb..c1abe8e90be 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala @@ -897,7 +897,7 @@ abstract class SQLViewSuite extends QueryTest with SQLTestUtils { val e = intercept[ArithmeticException] { sql("SELECT * FROM v5").collect() }.getMessage - assert(e.contains("divide by zero")) + assert(e.contains("Division by zero")) } } @@ -907,7 +907,7 @@ abstract class SQLViewSuite extends QueryTest with SQLTestUtils { val e = intercept[ArithmeticException] { sql("SELECT * FROM v1").collect() }.getMessage - assert(e.contains("divide by zero")) + assert(e.contains("Division by zero")) } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala index 8a45895ca60..d8c69b0984a 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala @@ -2332,7 +2332,7 @@ class FileStreamSourceSuite extends FileStreamSourceTest { ).foreach { schema => withTempDir { dir => val colName = "col" - val msg = "can only contain StringType as a key type for a MapType" + val msg = "can only contain STRING as a key type for a MAP" val thrown1 = intercept[AnalysisException]( spark.readStream.schema(StructType(Seq(StructField(colName, schema)))) --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org