sunxiaoguang commented on code in PR #49453:
URL: https://github.com/apache/spark/pull/49453#discussion_r1915931322
##########
sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala:
##########
@@ -112,6 +112,21 @@ private case class MySQLDialect() extends JdbcDialect with
SQLConfHelper with No
} else {
super.visitAggregateFunction(funcName, isDistinct, inputs)
}
+
+ override def visitCast(expr: String, exprDataType: DataType, dataType:
DataType): String = {
+ val databaseTypeDefinition = dataType match {
+ // MySQL uses CHAR in the cast function for the type LONGTEXT
+ case StringType => "CHAR"
+ // MySQL uses SIGNED INTEGER in the cast function for SMALLINT,
INTEGER and BIGINT.
+ // To avoid breaking code relying on ResultSet metadata, we support
BIGINT only at
+ // this time.
+ case LongType => "SIGNED INTEGER"
+ // MySQL uses BINARY in the cast function for the type BLOB
+ case BinaryType => "BINARY"
+ case _ =>
getJDBCType(dataType).map(_.databaseTypeDefinition).getOrElse(dataType.typeName)
Review Comment:
Sure, we can add an explicit error message for this case. But I'm not quite
familiar with Spark code base right now. I saw there are things like
_LEGACY_ERROR_TEMP_xxxx in code base. Could you please leave me some hints on
the rules to follow. Thanks.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]