This is an automated email from the ASF dual-hosted git repository.
hvanhovell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 783880b2ff7 [SPARK-44281][SQL] Move QueryCompilation error that used
by DataType to sql/api as DataTypeErrors
783880b2ff7 is described below
commit 783880b2ff79a62749dd3277c855295f23b813fd
Author: Rui Wang <[email protected]>
AuthorDate: Wed Jul 5 16:40:30 2023 -0400
[SPARK-44281][SQL] Move QueryCompilation error that used by DataType to
sql/api as DataTypeErrors
### What changes were proposed in this pull request?
Move QueryCompilation error that used by DataType to sql/api as
DataTypeErrors.
### Why are the changes needed?
To further simplify DataType interface.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
Exiting tests.
Closes #41835 from amaliujia/compilation_errors.
Authored-by: Rui Wang <[email protected]>
Signed-off-by: Herman van Hovell <[email protected]>
---
.../spark/sql/catalyst/util/QuotingUtils.scala | 31 ++++++++++++
.../spark/sql/catalyst/util/StringUtils.scala | 6 +++
.../apache/spark/sql/errors/DataTypeErrors.scala | 56 +++++++++++++++++++++-
.../spark/sql/errors/DataTypeErrorsBase.scala | 29 +++++++++++
.../apache/spark/sql/catalyst/util/package.scala | 4 +-
.../spark/sql/errors/QueryCompilationErrors.scala | 54 +--------------------
.../apache/spark/sql/errors/QueryErrorsBase.scala | 32 +++++--------
.../org/apache/spark/sql/types/DataType.scala | 4 +-
.../spark/sql/types/DayTimeIntervalType.scala | 9 ++--
.../org/apache/spark/sql/types/DecimalType.scala | 6 +--
.../spark/sql/types/YearMonthIntervalType.scala | 9 ++--
.../apache/spark/sql/types/StructTypeSuite.scala | 4 +-
.../org/apache/spark/sql/JsonFunctionsSuite.scala | 6 +--
13 files changed, 158 insertions(+), 92 deletions(-)
diff --git
a/sql/api/src/main/scala/org/apache/spark/sql/catalyst/util/QuotingUtils.scala
b/sql/api/src/main/scala/org/apache/spark/sql/catalyst/util/QuotingUtils.scala
new file mode 100644
index 00000000000..7d5b6946244
--- /dev/null
+++
b/sql/api/src/main/scala/org/apache/spark/sql/catalyst/util/QuotingUtils.scala
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.catalyst.util
+
+object QuotingUtils {
+ private def quoteByDefault(elem: String): String = {
+ "\"" + elem + "\""
+ }
+
+ def toSQLConf(conf: String): String = {
+ quoteByDefault(conf)
+ }
+
+ def toSQLSchema(schema: String): String = {
+ quoteByDefault(schema)
+ }
+}
diff --git
a/sql/api/src/main/scala/org/apache/spark/sql/catalyst/util/StringUtils.scala
b/sql/api/src/main/scala/org/apache/spark/sql/catalyst/util/StringUtils.scala
index 384453e3b53..c12a1f50daa 100644
---
a/sql/api/src/main/scala/org/apache/spark/sql/catalyst/util/StringUtils.scala
+++
b/sql/api/src/main/scala/org/apache/spark/sql/catalyst/util/StringUtils.scala
@@ -101,4 +101,10 @@ object SparkStringUtils extends Logging {
def truncatedString[T](seq: Seq[T], sep: String, maxFields: Int): String = {
truncatedString(seq, "", sep, "", maxFields)
}
+
+ def quoteIdentifier(name: String): String = {
+ // Escapes back-ticks within the identifier name with double-back-ticks,
and then quote the
+ // identifier with back-ticks.
+ "`" + name.replace("`", "``") + "`"
+ }
}
diff --git
a/sql/api/src/main/scala/org/apache/spark/sql/errors/DataTypeErrors.scala
b/sql/api/src/main/scala/org/apache/spark/sql/errors/DataTypeErrors.scala
index 02e8b12c707..f39b5b13456 100644
--- a/sql/api/src/main/scala/org/apache/spark/sql/errors/DataTypeErrors.scala
+++ b/sql/api/src/main/scala/org/apache/spark/sql/errors/DataTypeErrors.scala
@@ -17,6 +17,7 @@
package org.apache.spark.sql.errors
import org.apache.spark.{SparkArithmeticException, SparkException,
SparkRuntimeException, SparkUnsupportedOperationException}
+import org.apache.spark.sql.catalyst.util.QuotingUtils
import org.apache.spark.unsafe.types.UTF8String
/**
@@ -24,7 +25,7 @@ import org.apache.spark.unsafe.types.UTF8String
* This does not include exceptions thrown during the eager execution of
commands, which are
* grouped into [[QueryCompilationErrors]].
*/
-private[sql] object DataTypeErrors {
+private[sql] object DataTypeErrors extends DataTypeErrorsBase {
def unsupportedOperationExceptionError(): SparkUnsupportedOperationException
= {
new SparkUnsupportedOperationException(
errorClass = "_LEGACY_ERROR_TEMP_2225",
@@ -92,4 +93,57 @@ private[sql] object DataTypeErrors {
errorClass = "_LEGACY_ERROR_TEMP_2120",
messageParameters = Map("clazz" -> clazz.toString()))
}
+
+ def schemaFailToParseError(schema: String, e: Throwable): Throwable = {
+ new SparkException(
+ errorClass = "INVALID_SCHEMA.PARSE_ERROR",
+ messageParameters = Map(
+ "inputSchema" -> QuotingUtils.toSQLSchema(schema),
+ "reason" -> e.getMessage
+ ),
+ cause = e)
+ }
+
+ def invalidDayTimeIntervalType(startFieldName: String, endFieldName:
String): Throwable = {
+ new SparkException(
+ errorClass = "_LEGACY_ERROR_TEMP_1224",
+ messageParameters = Map(
+ "startFieldName" -> startFieldName,
+ "endFieldName" -> endFieldName),
+ cause = null)
+ }
+
+ def invalidDayTimeField(field: Byte, supportedIds: Seq[String]): Throwable =
{
+ new SparkException(
+ errorClass = "_LEGACY_ERROR_TEMP_1223",
+ messageParameters = Map(
+ "field" -> field.toString,
+ "supportedIds" -> supportedIds.mkString(", ")),
+ cause = null)
+ }
+
+ def invalidYearMonthField(field: Byte, supportedIds: Seq[String]): Throwable
= {
+ new SparkException(
+ errorClass = "_LEGACY_ERROR_TEMP_1225",
+ messageParameters = Map(
+ "field" -> field.toString,
+ "supportedIds" -> supportedIds.mkString(", ")),
+ cause = null)
+ }
+
+ def decimalCannotGreaterThanPrecisionError(scale: Int, precision: Int):
Throwable = {
+ new SparkException(
+ errorClass = "_LEGACY_ERROR_TEMP_1228",
+ messageParameters = Map(
+ "scale" -> scale.toString,
+ "precision" -> precision.toString),
+ cause = null)
+ }
+
+ def negativeScaleNotAllowedError(scale: Int): Throwable = {
+ val sqlConf =
QuotingUtils.toSQLConf("spark.sql.legacy.allowNegativeScaleOfDecimal")
+ SparkException.internalError(s"Negative scale is not allowed:
${scale.toString}." +
+ s" Set the config ${sqlConf}" +
+ " to \"true\" to allow it.")
+ }
}
diff --git
a/sql/api/src/main/scala/org/apache/spark/sql/errors/DataTypeErrorsBase.scala
b/sql/api/src/main/scala/org/apache/spark/sql/errors/DataTypeErrorsBase.scala
new file mode 100644
index 00000000000..dc95e76a820
--- /dev/null
+++
b/sql/api/src/main/scala/org/apache/spark/sql/errors/DataTypeErrorsBase.scala
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.errors
+
+import org.apache.spark.sql.catalyst.util.SparkStringUtils
+
+private[sql] trait DataTypeErrorsBase {
+ def toSQLId(parts: Seq[String]): String = {
+ val cleaned = parts match {
+ case Seq("__auto_generated_subquery_name", rest @ _*) if rest != Nil =>
rest
+ case other => other
+ }
+ cleaned.map(SparkStringUtils.quoteIdentifier).mkString(".")
+ }
+}
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
index 0555d8d5fa4..35d9b256800 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
@@ -123,9 +123,7 @@ package object util extends Logging {
}
def quoteIdentifier(name: String): String = {
- // Escapes back-ticks within the identifier name with double-back-ticks,
and then quote the
- // identifier with back-ticks.
- "`" + name.replace("`", "``") + "`"
+ SparkStringUtils.quoteIdentifier(name)
}
def quoteNameParts(name: Seq[String]): String = {
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
index 48223cb34e1..6a7979afe41 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
@@ -38,7 +38,7 @@ import
org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
import org.apache.spark.sql.connector.catalog.functions.{BoundFunction,
UnboundFunction}
import org.apache.spark.sql.connector.expressions.filter.Predicate
import org.apache.spark.sql.internal.SQLConf
-import
org.apache.spark.sql.internal.SQLConf.{LEGACY_ALLOW_NEGATIVE_SCALE_OF_DECIMAL_ENABLED,
LEGACY_CTE_PRECEDENCE_POLICY}
+import org.apache.spark.sql.internal.SQLConf.LEGACY_CTE_PRECEDENCE_POLICY
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types._
@@ -1047,16 +1047,6 @@ private[sql] object QueryCompilationErrors extends
QueryErrorsBase {
"totalAmountOfParts" -> totalAmountOfParts))
}
- def schemaFailToParseError(schema: String, e: Throwable): Throwable = {
- new AnalysisException(
- errorClass = "INVALID_SCHEMA.PARSE_ERROR",
- messageParameters = Map(
- "inputSchema" -> toSQLSchema(schema),
- "reason" -> e.getMessage
- ),
- cause = Some(e))
- }
-
def unexpectedSchemaTypeError(exp: Expression): Throwable = {
new AnalysisException(
errorClass = "INVALID_SCHEMA.NON_STRING_LITERAL",
@@ -2257,34 +2247,6 @@ private[sql] object QueryCompilationErrors extends
QueryErrorsBase {
messageParameters = Map("resourceType" -> resourceType))
}
- def invalidDayTimeField(field: Byte): Throwable = {
- val supportedIds = DayTimeIntervalType.dayTimeFields
- .map(i => s"$i (${DayTimeIntervalType.fieldToString(i)})")
- new AnalysisException(
- errorClass = "_LEGACY_ERROR_TEMP_1223",
- messageParameters = Map(
- "field" -> field.toString,
- "supportedIds" -> supportedIds.mkString(", ")))
- }
-
- def invalidDayTimeIntervalType(startFieldName: String, endFieldName:
String): Throwable = {
- new AnalysisException(
- errorClass = "_LEGACY_ERROR_TEMP_1224",
- messageParameters = Map(
- "startFieldName" -> startFieldName,
- "endFieldName" -> endFieldName))
- }
-
- def invalidYearMonthField(field: Byte): Throwable = {
- val supportedIds = YearMonthIntervalType.yearMonthFields
- .map(i => s"$i (${YearMonthIntervalType.fieldToString(i)})")
- new AnalysisException(
- errorClass = "_LEGACY_ERROR_TEMP_1225",
- messageParameters = Map(
- "field" -> field.toString,
- "supportedIds" -> supportedIds.mkString(", ")))
- }
-
def configRemovedInVersionError(
configName: String,
version: String,
@@ -2297,20 +2259,6 @@ private[sql] object QueryCompilationErrors extends
QueryErrorsBase {
"comment" -> comment))
}
- def decimalCannotGreaterThanPrecisionError(scale: Int, precision: Int):
Throwable = {
- new AnalysisException(
- errorClass = "_LEGACY_ERROR_TEMP_1228",
- messageParameters = Map(
- "scale" -> scale.toString,
- "precision" -> precision.toString))
- }
-
- def negativeScaleNotAllowedError(scale: Int): Throwable = {
- SparkException.internalError(s"Negative scale is not allowed:
${scale.toString}." +
- s" Set the config
${toSQLConf(LEGACY_ALLOW_NEGATIVE_SCALE_OF_DECIMAL_ENABLED.key)}" +
- " to \"true\" to allow it.")
- }
-
def invalidPartitionColumnKeyInTableError(key: String, tblName: String):
Throwable = {
new AnalysisException(
errorClass = "_LEGACY_ERROR_TEMP_1231",
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
index 77f714a0f90..0a44ededebd 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
@@ -23,13 +23,14 @@ import org.apache.spark.QueryContext
import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
import org.apache.spark.sql.catalyst.expressions.{Expression, Literal}
import org.apache.spark.sql.catalyst.trees.SQLQueryContext
-import org.apache.spark.sql.catalyst.util.{quoteIdentifier, toPrettySQL}
+import org.apache.spark.sql.catalyst.util.{toPrettySQL, QuotingUtils}
import org.apache.spark.sql.types.{AbstractDataType, DataType, DoubleType,
FloatType, TypeCollection}
/**
* The trait exposes util methods for preparing error messages such as quoting
of error elements.
* All classes that extent `QueryErrorsBase` shall follow the rules:
- * 1. Any values shall be outputted in the SQL standard style by using
`toSQLValue()`.
+ * 1. Any values shall be outputted in the SQL standard style by usi
+ * ng `toSQLValue()`.
* For example: 'a string value', 1, NULL.
* 2. SQL types shall be double quoted and outputted in the upper case using
`toSQLType()`.
* For example: "INT", "DECIMAL(10,0)".
@@ -45,7 +46,7 @@ import org.apache.spark.sql.types.{AbstractDataType,
DataType, DoubleType, Float
* 7. SQL expressions shall be wrapped by double quotes.
* For example: "earnings + 1".
*/
-private[sql] trait QueryErrorsBase {
+private[sql] trait QueryErrorsBase extends DataTypeErrorsBase {
// Converts an error class parameter to its SQL representation
def toSQLValue(v: Any, t: DataType): String = Literal.create(v, t) match {
case Literal(null, _) => "NULL"
@@ -70,14 +71,6 @@ private[sql] trait QueryErrorsBase {
text.toUpperCase(Locale.ROOT)
}
- def toSQLId(parts: Seq[String]): String = {
- val cleaned = parts match {
- case Seq("__auto_generated_subquery_name", rest @ _*) if rest != Nil =>
rest
- case other => other
- }
- cleaned.map(quoteIdentifier).mkString(".")
- }
-
def toSQLId(parts: String): String = {
toSQLId(UnresolvedAttribute.parseAttributeName(parts))
}
@@ -92,10 +85,6 @@ private[sql] trait QueryErrorsBase {
quoteByDefault(text.toUpperCase(Locale.ROOT))
}
- def toSQLConf(conf: String): String = {
- quoteByDefault(conf)
- }
-
def toSQLConfVal(conf: String): String = {
quoteByDefault(conf)
}
@@ -104,12 +93,12 @@ private[sql] trait QueryErrorsBase {
quoteByDefault(option)
}
- def toSQLExpr(e: Expression): String = {
- quoteByDefault(toPrettySQL(e))
+ def toSQLConf(conf: String): String = {
+ QuotingUtils.toSQLConf(conf)
}
- def toSQLSchema(schema: String): String = {
- quoteByDefault(schema)
+ def toSQLExpr(e: Expression): String = {
+ quoteByDefault(toPrettySQL(e))
}
def getSummary(sqlContext: SQLQueryContext): String = {
@@ -119,4 +108,9 @@ private[sql] trait QueryErrorsBase {
def getQueryContext(sqlContext: SQLQueryContext): Array[QueryContext] = {
if (sqlContext == null) Array.empty else
Array(sqlContext.asInstanceOf[QueryContext])
}
+
+ def toSQLSchema(schema: String): String = {
+ QuotingUtils.toSQLSchema(schema)
+ }
}
+
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/DataType.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/DataType.scala
index 29201b053cf..2a35bd87eda 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/DataType.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/DataType.scala
@@ -34,7 +34,7 @@ import org.apache.spark.sql.catalyst.parser.DataTypeParser
import org.apache.spark.sql.catalyst.types.DataTypeUtils
import
org.apache.spark.sql.catalyst.util.DataTypeJsonUtils.{DataTypeJsonDeserializer,
DataTypeJsonSerializer}
import org.apache.spark.sql.catalyst.util.StringConcat
-import org.apache.spark.sql.errors.QueryCompilationErrors
+import org.apache.spark.sql.errors.DataTypeErrors
import org.apache.spark.sql.types.DayTimeIntervalType._
import org.apache.spark.sql.types.YearMonthIntervalType._
import org.apache.spark.util.Utils
@@ -140,7 +140,7 @@ object DataType {
if (e.isInstanceOf[SparkThrowable]) {
throw e
}
- throw QueryCompilationErrors.schemaFailToParseError(schema, e)
+ throw DataTypeErrors.schemaFailToParseError(schema, e)
}
}
}
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/DayTimeIntervalType.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/DayTimeIntervalType.scala
index aaea89e9a6e..a1d014fa51f 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/DayTimeIntervalType.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/DayTimeIntervalType.scala
@@ -18,7 +18,7 @@
package org.apache.spark.sql.types
import org.apache.spark.annotation.Unstable
-import org.apache.spark.sql.errors.QueryCompilationErrors
+import org.apache.spark.sql.errors.DataTypeErrors
import org.apache.spark.sql.types.DayTimeIntervalType.fieldToString
/**
@@ -56,7 +56,7 @@ case class DayTimeIntervalType(startField: Byte, endField:
Byte) extends AnsiInt
} else if (startField < endField) {
s"interval $startFieldName to $endFieldName"
} else {
- throw QueryCompilationErrors.invalidDayTimeIntervalType(startFieldName,
endFieldName)
+ throw DataTypeErrors.invalidDayTimeIntervalType(startFieldName,
endFieldName)
}
}
}
@@ -79,7 +79,10 @@ case object DayTimeIntervalType extends AbstractDataType {
case HOUR => "hour"
case MINUTE => "minute"
case SECOND => "second"
- case invalid => throw QueryCompilationErrors.invalidDayTimeField(invalid)
+ case invalid =>
+ val supportedIds = DayTimeIntervalType.dayTimeFields
+ .map(i => s"$i (${DayTimeIntervalType.fieldToString(i)})")
+ throw DataTypeErrors.invalidDayTimeField(invalid, supportedIds)
}
val stringToField: Map[String, Byte] = dayTimeFields.map(i =>
fieldToString(i) -> i).toMap
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/DecimalType.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/DecimalType.scala
index 49ac217f1bd..e4607462392 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/DecimalType.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/DecimalType.scala
@@ -22,7 +22,7 @@ import java.util.Locale
import scala.annotation.tailrec
import org.apache.spark.annotation.Stable
-import org.apache.spark.sql.errors.{DataTypeErrors, QueryCompilationErrors}
+import org.apache.spark.sql.errors.DataTypeErrors
import org.apache.spark.sql.internal.SQLConf
/**
@@ -44,7 +44,7 @@ case class DecimalType(precision: Int, scale: Int) extends
FractionalType {
DecimalType.checkNegativeScale(scale)
if (scale > precision) {
- throw QueryCompilationErrors.decimalCannotGreaterThanPrecisionError(scale,
precision)
+ throw DataTypeErrors.decimalCannotGreaterThanPrecisionError(scale,
precision)
}
if (precision > DecimalType.MAX_PRECISION) {
@@ -148,7 +148,7 @@ object DecimalType extends AbstractDataType {
private[sql] def checkNegativeScale(scale: Int): Unit = {
if (scale < 0 && !SQLConf.get.allowNegativeScaleOfDecimalEnabled) {
- throw QueryCompilationErrors.negativeScaleNotAllowedError(scale)
+ throw DataTypeErrors.negativeScaleNotAllowedError(scale)
}
}
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/YearMonthIntervalType.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/YearMonthIntervalType.scala
index 4cb3fda3e99..6532a3b220c 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/YearMonthIntervalType.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/YearMonthIntervalType.scala
@@ -18,7 +18,7 @@
package org.apache.spark.sql.types
import org.apache.spark.annotation.Unstable
-import org.apache.spark.sql.errors.QueryCompilationErrors
+import org.apache.spark.sql.errors.DataTypeErrors
import org.apache.spark.sql.types.YearMonthIntervalType.fieldToString
/**
@@ -54,7 +54,7 @@ case class YearMonthIntervalType(startField: Byte, endField:
Byte) extends AnsiI
} else if (startField < endField) {
s"interval $startFieldName to $endFieldName"
} else {
- throw QueryCompilationErrors.invalidDayTimeIntervalType(startFieldName,
endFieldName)
+ throw DataTypeErrors.invalidDayTimeIntervalType(startFieldName,
endFieldName)
}
}
}
@@ -73,7 +73,10 @@ case object YearMonthIntervalType extends AbstractDataType {
def fieldToString(field: Byte): String = field match {
case YEAR => "year"
case MONTH => "month"
- case invalid => throw QueryCompilationErrors.invalidYearMonthField(invalid)
+ case invalid =>
+ val supportedIds = YearMonthIntervalType.yearMonthFields
+ .map(i => s"$i (${YearMonthIntervalType.fieldToString(i)})")
+ throw DataTypeErrors.invalidYearMonthField(invalid, supportedIds)
}
val stringToField: Map[String, Byte] = yearMonthFields.map(i =>
fieldToString(i) -> i).toMap
diff --git
a/sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
b/sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
index 601d974d9c3..1f4d8311540 100644
---
a/sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
+++
b/sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
@@ -335,11 +335,11 @@ class StructTypeSuite extends SparkFunSuite with
SQLHelper {
"path" -> "`s1`.`s12`"))
// ambiguous name
- e = intercept[AnalysisException] {
+ var e2 = intercept[AnalysisException] {
check(Seq("S2", "x"), None)
}
checkError(
- exception = e,
+ exception = e2,
errorClass = "AMBIGUOUS_COLUMN_OR_FIELD",
parameters = Map("name" -> "`S2`.`x`", "n" -> "2"))
caseSensitiveCheck(Seq("s2", "x"), Some(Seq("s2") -> StructField("x",
IntegerType)))
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
index 187fab75f63..b21a8344fd0 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
@@ -1150,7 +1150,7 @@ class JsonFunctionsSuite extends QueryTest with
SharedSparkSession {
val invalidJsonSchema = """{"fields": [{"a":123}], "type": "struct"}"""
val invalidJsonSchemaReason = "Failed to convert the JSON string
'{\"a\":123}' to a field."
checkError(
- exception = intercept[AnalysisException] {
+ exception = intercept[SparkException] {
df.select(from_json($"json", invalidJsonSchema, Map.empty[String,
String])).collect()
},
errorClass = "INVALID_SCHEMA.PARSE_ERROR",
@@ -1165,7 +1165,7 @@ class JsonFunctionsSuite extends QueryTest with
SharedSparkSession {
"was expecting (JSON String, Number, Array, Object or token 'null',
'true' or 'false')\n " +
"at [Source: (String)\"MAP<INT, cow>\"; line: 1, column: 4]"
checkError(
- exception = intercept[AnalysisException] {
+ exception = intercept[SparkException] {
df.select(from_json($"json", invalidDataType, Map.empty[String,
String])).collect()
},
errorClass = "INVALID_SCHEMA.PARSE_ERROR",
@@ -1180,7 +1180,7 @@ class JsonFunctionsSuite extends QueryTest with
SharedSparkSession {
"was expecting (JSON String, Number, Array, Object or token 'null',
'true' or 'false')\n" +
" at [Source: (String)\"x INT, a cow\"; line: 1, column: 2]"
checkError(
- exception = intercept[AnalysisException] {
+ exception = intercept[SparkException] {
df.select(from_json($"json", invalidTableSchema, Map.empty[String,
String])).collect()
},
errorClass = "INVALID_SCHEMA.PARSE_ERROR",
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]