karenfeng commented on a change in pull request #33864:
URL: https://github.com/apache/spark/pull/33864#discussion_r710563000
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
##########
@@ -651,50 +651,52 @@ object QueryExecutionErrors {
def missingJdbcTableNameAndQueryError(
jdbcTableName: String, jdbcQueryString: String): Throwable = {
- new IllegalArgumentException(
- s"Option '$jdbcTableName' or '$jdbcQueryString' is required."
- )
+ new SparkIllegalArgumentException(errorClass =
"MISSING_JDBC_TABLE_NAME_AND_QUERY",
+ messageParameters = Array(jdbcTableName, jdbcQueryString))
}
def emptyOptionError(optionName: String): Throwable = {
- new IllegalArgumentException(s"Option `$optionName` can not be empty.")
+ new SparkIllegalArgumentException(errorClass = "EMPTY_OPTION",
+ messageParameters = Array(optionName))
}
def invalidJdbcTxnIsolationLevelError(jdbcTxnIsolationLevel: String, value:
String): Throwable = {
- new IllegalArgumentException(
- s"Invalid value `$value` for parameter `$jdbcTxnIsolationLevel`. This
can be " +
- "`NONE`, `READ_UNCOMMITTED`, `READ_COMMITTED`, `REPEATABLE_READ` or
`SERIALIZABLE`.")
+ new SparkIllegalArgumentException(errorClass =
"INVALID_JDBC_TXN_ISOLATION_LEVEL",
Review comment:
When the class name is updated, it should be updated here as well:
`INVALID_JDBC_TRANSACTION_ISOLATION_LEVEL`
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
##########
@@ -703,45 +705,50 @@ object QueryExecutionErrors {
}
def dataTypeUnsupportedYetError(dataType: DataType): Throwable = {
- new UnsupportedOperationException(s"$dataType is not supported yet.")
+ new SparkUnsupportedOperationException(errorClass =
"DATA_TYPE_UNSUPPORTED_YET",
Review comment:
Update class name
##########
File path: core/src/main/resources/error/error-classes.json
##########
@@ -3,9 +3,29 @@
"message" : [ "Field name %s is ambiguous and has %s matching fields in
the struct." ],
"sqlState" : "42000"
},
+ "CANNOT_GET_JDBC_TYPE" : {
+ "message" : [ "Can't get JDBC type for %s" ],
+ "sqlState" : "42000"
+ },
+ "CANNOT_READ_FOOTER_FOR_FILE" : {
+ "message" : [ "Could not read footer for file: %s" ],
+ "sqlState" : "42000"
+ },
+ "CANNOT_TRANSLATE_NON_NULL_VALUE_FOR_FIELD" : {
+ "message" : [ "Can't translate non-null value for field %s" ],
+ "sqlState" : "0A000"
Review comment:
Is this an internal error? If so, we should leave the SQLSTATE empty.
##########
File path: core/src/main/resources/error/error-classes.json
##########
@@ -3,9 +3,29 @@
"message" : [ "Field name %s is ambiguous and has %s matching fields in
the struct." ],
"sqlState" : "42000"
},
+ "CANNOT_GET_JDBC_TYPE" : {
+ "message" : [ "Can't get JDBC type for %s" ],
+ "sqlState" : "42000"
Review comment:
Is this an internal error? If so, we should leave the SQLSTATE empty.
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
##########
@@ -703,45 +705,50 @@ object QueryExecutionErrors {
}
def dataTypeUnsupportedYetError(dataType: DataType): Throwable = {
- new UnsupportedOperationException(s"$dataType is not supported yet.")
+ new SparkUnsupportedOperationException(errorClass =
"DATA_TYPE_UNSUPPORTED_YET",
+ messageParameters = Array(dataType.toString))
}
def unsupportedOperationForDataTypeError(dataType: DataType): Throwable = {
- new UnsupportedOperationException(s"DataType: ${dataType.catalogString}")
+ new SparkUnsupportedOperationException(errorClass =
"UNSUPPORTED_OPERATION_FOR_DATA_TYPE",
+ messageParameters = Array(dataType.catalogString))
}
def inputFilterNotFullyConvertibleError(owner: String): Throwable = {
- new SparkException(s"The input filter of $owner should be fully
convertible.")
+ new SparkException(errorClass = "INPUT_FILTER_NOT_FULLY_CONVERTIBLE",
+ messageParameters = Array(owner), null)
}
def cannotReadFooterForFileError(file: Path, e: IOException): Throwable = {
- new SparkException(s"Could not read footer for file: $file", e)
+ new SparkException(errorClass = "CANNOT_READ_FOOTER_FOR_FILE",
+ messageParameters = Array(file.toString), e)
}
def cannotReadFooterForFileError(file: FileStatus, e: RuntimeException):
Throwable = {
- new IOException(s"Could not read footer for file: $file", e)
+ new SparkIOException(errorClass = "CANNOT_READ_FOOTER_FOR_FILE",
+ messageParameters = Array(file.toString), e)
}
def foundDuplicateFieldInCaseInsensitiveModeError(
requiredFieldName: String, matchedOrcFields: String): Throwable = {
- new RuntimeException(
- s"""
- |Found duplicate field(s) "$requiredFieldName": $matchedOrcFields
- |in case-insensitive mode
- """.stripMargin.replaceAll("\n", " "))
+ new SparkRuntimeException(errorClass =
"FOUND_DUPLICATE_FIELD_IN_CASE_INSENSITIVE_MODE",
+ messageParameters = Array(requiredFieldName, matchedOrcFields))
}
def failedToMergeIncompatibleSchemasError(
left: StructType, right: StructType, e: Throwable): Throwable = {
- new SparkException(s"Failed to merge incompatible schemas $left and
$right", e)
+ new SparkException(errorClass = "FAILED_TO_MERGE_IN_COMPATIBLE_SCHEMAS",
Review comment:
Update class name
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
##########
@@ -703,45 +705,50 @@ object QueryExecutionErrors {
}
def dataTypeUnsupportedYetError(dataType: DataType): Throwable = {
- new UnsupportedOperationException(s"$dataType is not supported yet.")
+ new SparkUnsupportedOperationException(errorClass =
"DATA_TYPE_UNSUPPORTED_YET",
+ messageParameters = Array(dataType.toString))
}
def unsupportedOperationForDataTypeError(dataType: DataType): Throwable = {
- new UnsupportedOperationException(s"DataType: ${dataType.catalogString}")
+ new SparkUnsupportedOperationException(errorClass =
"UNSUPPORTED_OPERATION_FOR_DATA_TYPE",
+ messageParameters = Array(dataType.catalogString))
}
def inputFilterNotFullyConvertibleError(owner: String): Throwable = {
- new SparkException(s"The input filter of $owner should be fully
convertible.")
+ new SparkException(errorClass = "INPUT_FILTER_NOT_FULLY_CONVERTIBLE",
+ messageParameters = Array(owner), null)
}
def cannotReadFooterForFileError(file: Path, e: IOException): Throwable = {
- new SparkException(s"Could not read footer for file: $file", e)
+ new SparkException(errorClass = "CANNOT_READ_FOOTER_FOR_FILE",
+ messageParameters = Array(file.toString), e)
}
def cannotReadFooterForFileError(file: FileStatus, e: RuntimeException):
Throwable = {
- new IOException(s"Could not read footer for file: $file", e)
+ new SparkIOException(errorClass = "CANNOT_READ_FOOTER_FOR_FILE",
+ messageParameters = Array(file.toString), e)
}
def foundDuplicateFieldInCaseInsensitiveModeError(
requiredFieldName: String, matchedOrcFields: String): Throwable = {
- new RuntimeException(
- s"""
- |Found duplicate field(s) "$requiredFieldName": $matchedOrcFields
- |in case-insensitive mode
- """.stripMargin.replaceAll("\n", " "))
+ new SparkRuntimeException(errorClass =
"FOUND_DUPLICATE_FIELD_IN_CASE_INSENSITIVE_MODE",
+ messageParameters = Array(requiredFieldName, matchedOrcFields))
}
def failedToMergeIncompatibleSchemasError(
left: StructType, right: StructType, e: Throwable): Throwable = {
- new SparkException(s"Failed to merge incompatible schemas $left and
$right", e)
+ new SparkException(errorClass = "FAILED_TO_MERGE_IN_COMPATIBLE_SCHEMAS",
+ messageParameters = Array(left.toString, right.toString), e)
}
def ddlUnsupportedTemporarilyError(ddl: String): Throwable = {
- new UnsupportedOperationException(s"$ddl is not supported temporarily.")
+ new SparkUnsupportedOperationException(errorClass =
"DDL_UNSUPPORTED_TEMPORARILY",
+ messageParameters = Array(ddl))
}
def operatingOnCanonicalizationPlanError(): Throwable = {
- new IllegalStateException("operating on canonicalization plan")
+ new SparkIllegalStateException(errorClass =
"OPERATING_ON_CANONICALIZATION_PLAN",
Review comment:
This error class is confusing; can we update it to
`CANNOT_OPERATE_ON_CANONICALIZATION_PLAN`?
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
##########
@@ -703,45 +705,50 @@ object QueryExecutionErrors {
}
def dataTypeUnsupportedYetError(dataType: DataType): Throwable = {
- new UnsupportedOperationException(s"$dataType is not supported yet.")
+ new SparkUnsupportedOperationException(errorClass =
"DATA_TYPE_UNSUPPORTED_YET",
+ messageParameters = Array(dataType.toString))
}
def unsupportedOperationForDataTypeError(dataType: DataType): Throwable = {
- new UnsupportedOperationException(s"DataType: ${dataType.catalogString}")
+ new SparkUnsupportedOperationException(errorClass =
"UNSUPPORTED_OPERATION_FOR_DATA_TYPE",
+ messageParameters = Array(dataType.catalogString))
}
def inputFilterNotFullyConvertibleError(owner: String): Throwable = {
- new SparkException(s"The input filter of $owner should be fully
convertible.")
+ new SparkException(errorClass = "INPUT_FILTER_NOT_FULLY_CONVERTIBLE",
+ messageParameters = Array(owner), null)
}
def cannotReadFooterForFileError(file: Path, e: IOException): Throwable = {
- new SparkException(s"Could not read footer for file: $file", e)
+ new SparkException(errorClass = "CANNOT_READ_FOOTER_FOR_FILE",
+ messageParameters = Array(file.toString), e)
}
def cannotReadFooterForFileError(file: FileStatus, e: RuntimeException):
Throwable = {
- new IOException(s"Could not read footer for file: $file", e)
+ new SparkIOException(errorClass = "CANNOT_READ_FOOTER_FOR_FILE",
+ messageParameters = Array(file.toString), e)
}
def foundDuplicateFieldInCaseInsensitiveModeError(
requiredFieldName: String, matchedOrcFields: String): Throwable = {
- new RuntimeException(
- s"""
- |Found duplicate field(s) "$requiredFieldName": $matchedOrcFields
- |in case-insensitive mode
- """.stripMargin.replaceAll("\n", " "))
+ new SparkRuntimeException(errorClass =
"FOUND_DUPLICATE_FIELD_IN_CASE_INSENSITIVE_MODE",
+ messageParameters = Array(requiredFieldName, matchedOrcFields))
}
def failedToMergeIncompatibleSchemasError(
left: StructType, right: StructType, e: Throwable): Throwable = {
- new SparkException(s"Failed to merge incompatible schemas $left and
$right", e)
+ new SparkException(errorClass = "FAILED_TO_MERGE_IN_COMPATIBLE_SCHEMAS",
+ messageParameters = Array(left.toString, right.toString), e)
}
def ddlUnsupportedTemporarilyError(ddl: String): Throwable = {
- new UnsupportedOperationException(s"$ddl is not supported temporarily.")
+ new SparkUnsupportedOperationException(errorClass =
"DDL_UNSUPPORTED_TEMPORARILY",
Review comment:
Update class name
##########
File path: core/src/main/resources/error/error-classes.json
##########
@@ -96,10 +151,22 @@
"message" : [ "Unrecognized SQL type %s" ],
"sqlState" : "42000"
},
+ "UNSUPPORTED_ARRAY_ELEMENT_TYPE_BASED_ON_BINARY" : {
+ "message" : [ "Unsupported array element type %s based on binary" ],
+ "sqlState" : "0A000"
+ },
+ "UNSUPPORTED_JDBC_TYPE" : {
+ "message" : [ "Unsupported type %s" ],
+ "sqlState" : "0A000"
+ },
"UNSUPPORTED_LITERAL_TYPE" : {
"message" : [ "Unsupported literal type %s %s" ],
"sqlState" : "0A000"
},
+ "UNSUPPORTED_OPERATION_FOR_DATA_TYPE" : {
Review comment:
This does not reflect the error message. Can you improve the error
message for this?
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
##########
@@ -651,50 +651,52 @@ object QueryExecutionErrors {
def missingJdbcTableNameAndQueryError(
jdbcTableName: String, jdbcQueryString: String): Throwable = {
- new IllegalArgumentException(
- s"Option '$jdbcTableName' or '$jdbcQueryString' is required."
- )
+ new SparkIllegalArgumentException(errorClass =
"MISSING_JDBC_TABLE_NAME_AND_QUERY",
+ messageParameters = Array(jdbcTableName, jdbcQueryString))
}
def emptyOptionError(optionName: String): Throwable = {
- new IllegalArgumentException(s"Option `$optionName` can not be empty.")
+ new SparkIllegalArgumentException(errorClass = "EMPTY_OPTION",
+ messageParameters = Array(optionName))
}
def invalidJdbcTxnIsolationLevelError(jdbcTxnIsolationLevel: String, value:
String): Throwable = {
- new IllegalArgumentException(
- s"Invalid value `$value` for parameter `$jdbcTxnIsolationLevel`. This
can be " +
- "`NONE`, `READ_UNCOMMITTED`, `READ_COMMITTED`, `REPEATABLE_READ` or
`SERIALIZABLE`.")
+ new SparkIllegalArgumentException(errorClass =
"INVALID_JDBC_TXN_ISOLATION_LEVEL",
+ messageParameters = Array(value, jdbcTxnIsolationLevel))
}
def cannotGetJdbcTypeError(dt: DataType): Throwable = {
- new IllegalArgumentException(s"Can't get JDBC type for
${dt.catalogString}")
+ new SparkIllegalArgumentException(errorClass = "CANNOT_GET_JDBC_TYPE",
+ messageParameters = Array(dt.catalogString))
}
def unrecognizedSqlTypeError(sqlType: Int): Throwable = {
new SparkSQLException(errorClass = "UNRECOGNIZED_SQL_TYPE",
Array(sqlType.toString))
}
def unsupportedJdbcTypeError(content: String): Throwable = {
- new SQLException(s"Unsupported type $content")
+ new SparkSQLException(errorClass = "UNSUPPORTED_JDBC_TYPE",
+ messageParameters = Array(content))
}
def unsupportedArrayElementTypeBasedOnBinaryError(dt: DataType): Throwable =
{
- new IllegalArgumentException(s"Unsupported array element " +
- s"type ${dt.catalogString} based on binary")
+ new SparkIllegalArgumentException(errorClass =
"UNSUPPORTED_ARRAY_ELEMENT_TYPE_BASED_ON_BINARY",
+ messageParameters = Array(dt.catalogString))
}
def nestedArraysUnsupportedError(): Throwable = {
- new IllegalArgumentException("Nested arrays unsupported")
+ new SparkIllegalArgumentException(errorClass = "NESTED_ARRAYS_UNSUPPORTED",
+ messageParameters = Array.empty)
}
def cannotTranslateNonNullValueForFieldError(pos: Int): Throwable = {
- new IllegalArgumentException(s"Can't translate non-null value for field
$pos")
+ new SparkIllegalArgumentException(errorClass =
"CANNOT_TRANSLATE_NON_NULL_VALUE_FOR_FIELD",
+ messageParameters = Array(pos.toString))
}
def invalidJdbcNumPartitionsError(n: Int, jdbcNumPartitions: String):
Throwable = {
- new IllegalArgumentException(
- s"Invalid value `$n` for parameter `$jdbcNumPartitions` in table writing
" +
- "via JDBC. The minimum value is 1.")
+ new SparkIllegalArgumentException(errorClass =
"INVALID_JDBC_NUMPARTITIONS",
Review comment:
Update class name: `INVALID_JDBC_NUM_PARTITIONS`
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]