This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 46fe10afc140 [SPARK-50215][SQL] Refactored StringType pattern matching 
in jdbc code stack
46fe10afc140 is described below

commit 46fe10afc140fb0ba04feb8d257b70f912dc9726
Author: Vladan Vasić <[email protected]>
AuthorDate: Tue Nov 5 15:26:16 2024 +0100

    [SPARK-50215][SQL] Refactored StringType pattern matching in jdbc code stack
    
    ### What changes were proposed in this pull request?
    
    I propose chaning pattern matching of `StringType` in the `jdbc` code stack.
    
    ### Why are the changes needed?
    
    These changes are needed in order to properly handle collated `StringType` 
in the `jdbc` code stack.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    No testing was needed.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No.
    
    Closes #48749 from 
vladanvasi-db/vladanvasi-db/jdbc-stringtype-match-refactor.
    
    Authored-by: Vladan Vasić <[email protected]>
    Signed-off-by: Max Gekk <[email protected]>
---
 sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala      | 2 +-
 .../src/main/scala/org/apache/spark/sql/jdbc/DatabricksDialect.scala    | 2 +-
 sql/core/src/main/scala/org/apache/spark/sql/jdbc/DerbyDialect.scala    | 2 +-
 sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala       | 2 +-
 .../src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala   | 2 +-
 sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala    | 2 +-
 sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala   | 2 +-
 sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala | 2 +-
 sql/core/src/main/scala/org/apache/spark/sql/jdbc/TeradataDialect.scala | 2 +-
 9 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala
index 2f54f1f62fde..3256803f6039 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala
@@ -101,7 +101,7 @@ private case class DB2Dialect() extends JdbcDialect with 
SQLConfHelper with NoLe
   }
 
   override def getJDBCType(dt: DataType): Option[JdbcType] = dt match {
-    case StringType => Option(JdbcType("CLOB", java.sql.Types.CLOB))
+    case _: StringType => Option(JdbcType("CLOB", java.sql.Types.CLOB))
     case BooleanType if conf.legacyDB2BooleanMappingEnabled =>
       Option(JdbcType("CHAR(1)", java.sql.Types.CHAR))
     case BooleanType => Option(JdbcType("BOOLEAN", java.sql.Types.BOOLEAN))
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DatabricksDialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DatabricksDialect.scala
index af77f8575dd8..3b855b376967 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DatabricksDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DatabricksDialect.scala
@@ -44,7 +44,7 @@ private case class DatabricksDialect() extends JdbcDialect 
with NoLegacyJDBCErro
   override def getJDBCType(dt: DataType): Option[JdbcType] = dt match {
     case BooleanType => Some(JdbcType("BOOLEAN", java.sql.Types.BOOLEAN))
     case DoubleType => Some(JdbcType("DOUBLE", java.sql.Types.DOUBLE))
-    case StringType => Some(JdbcType("STRING", java.sql.Types.VARCHAR))
+    case _: StringType => Some(JdbcType("STRING", java.sql.Types.VARCHAR))
     case BinaryType => Some(JdbcType("BINARY", java.sql.Types.BINARY))
     case _ => None
   }
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DerbyDialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DerbyDialect.scala
index 7b65a01b5e70..f78e155d485d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DerbyDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DerbyDialect.scala
@@ -44,7 +44,7 @@ private case class DerbyDialect() extends JdbcDialect with 
NoLegacyJDBCError {
   }
 
   override def getJDBCType(dt: DataType): Option[JdbcType] = dt match {
-    case StringType => Option(JdbcType("CLOB", java.sql.Types.CLOB))
+    case _: StringType => Option(JdbcType("CLOB", java.sql.Types.CLOB))
     case ByteType => Option(JdbcType("SMALLINT", java.sql.Types.SMALLINT))
     case ShortType => Option(JdbcType("SMALLINT", java.sql.Types.SMALLINT))
     case BooleanType => Option(JdbcType("BOOLEAN", java.sql.Types.BOOLEAN))
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala
index 798ecb5b36ff..5e5ba797ca60 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala
@@ -73,7 +73,7 @@ private[sql] case class H2Dialect() extends JdbcDialect with 
NoLegacyJDBCError {
   }
 
   override def getJDBCType(dt: DataType): Option[JdbcType] = dt match {
-    case StringType => Option(JdbcType("CLOB", Types.CLOB))
+    case _: StringType => Option(JdbcType("CLOB", Types.CLOB))
     case BooleanType => Some(JdbcType("BOOLEAN", Types.BOOLEAN))
     case ShortType | ByteType => Some(JdbcType("SMALLINT", Types.SMALLINT))
     case t: DecimalType => Some(
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala
index 7d476d43e5c7..a29f3d9550d1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala
@@ -135,7 +135,7 @@ private case class MsSqlServerDialect() extends JdbcDialect 
with NoLegacyJDBCErr
   override def getJDBCType(dt: DataType): Option[JdbcType] = dt match {
     case TimestampType => Some(JdbcType("DATETIME", java.sql.Types.TIMESTAMP))
     case TimestampNTZType => Some(JdbcType("DATETIME", 
java.sql.Types.TIMESTAMP))
-    case StringType => Some(JdbcType("NVARCHAR(MAX)", java.sql.Types.NVARCHAR))
+    case _: StringType => Some(JdbcType("NVARCHAR(MAX)", 
java.sql.Types.NVARCHAR))
     case BooleanType => Some(JdbcType("BIT", java.sql.Types.BIT))
     case BinaryType => Some(JdbcType("VARBINARY(MAX)", 
java.sql.Types.VARBINARY))
     case ShortType if !SQLConf.get.legacyMsSqlServerNumericMappingEnabled =>
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala
index dd0118d87599..c4f2793707e5 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala
@@ -256,7 +256,7 @@ private case class MySQLDialect() extends JdbcDialect with 
SQLConfHelper with No
     // See SPARK-35446: MySQL treats REAL as a synonym to DOUBLE by default
     // We override getJDBCType so that FloatType is mapped to FLOAT instead
     case FloatType => Option(JdbcType("FLOAT", java.sql.Types.FLOAT))
-    case StringType => Option(JdbcType("LONGTEXT", java.sql.Types.LONGVARCHAR))
+    case _: StringType => Option(JdbcType("LONGTEXT", 
java.sql.Types.LONGVARCHAR))
     case ByteType => Option(JdbcType("TINYINT", java.sql.Types.TINYINT))
     case ShortType => Option(JdbcType("SMALLINT", java.sql.Types.SMALLINT))
     // scalastyle:off line.size.limit
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala
index a73a34c64635..9c8a6bf5e145 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala
@@ -121,7 +121,7 @@ private case class OracleDialect() extends JdbcDialect with 
SQLConfHelper with N
     case DoubleType => Some(JdbcType("NUMBER(19, 4)", java.sql.Types.DOUBLE))
     case ByteType => Some(JdbcType("NUMBER(3)", java.sql.Types.SMALLINT))
     case ShortType => Some(JdbcType("NUMBER(5)", java.sql.Types.SMALLINT))
-    case StringType => Some(JdbcType("VARCHAR2(255)", java.sql.Types.VARCHAR))
+    case _: StringType => Some(JdbcType("VARCHAR2(255)", 
java.sql.Types.VARCHAR))
     case VarcharType(n) => Some(JdbcType(s"VARCHAR2($n)", 
java.sql.Types.VARCHAR))
     case TimestampType if !conf.legacyOracleTimestampMappingEnabled =>
       Some(JdbcType("TIMESTAMP WITH LOCAL TIME ZONE", TIMESTAMP_LTZ))
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala
index 8341063e0989..1265550b3f19 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala
@@ -145,7 +145,7 @@ private case class PostgresDialect()
   }
 
   override def getJDBCType(dt: DataType): Option[JdbcType] = dt match {
-    case StringType => Some(JdbcType("TEXT", Types.VARCHAR))
+    case _: StringType => Some(JdbcType("TEXT", Types.VARCHAR))
     case BinaryType => Some(JdbcType("BYTEA", Types.BINARY))
     case BooleanType => Some(JdbcType("BOOLEAN", Types.BOOLEAN))
     case FloatType => Some(JdbcType("FLOAT4", Types.FLOAT))
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/TeradataDialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/TeradataDialect.scala
index 322b259485f5..c7d8e899d71b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/TeradataDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/TeradataDialect.scala
@@ -40,7 +40,7 @@ private case class TeradataDialect() extends JdbcDialect with 
NoLegacyJDBCError
     supportedFunctions.contains(funcName)
 
   override def getJDBCType(dt: DataType): Option[JdbcType] = dt match {
-    case StringType => Some(JdbcType("VARCHAR(255)", java.sql.Types.VARCHAR))
+    case _: StringType => Some(JdbcType("VARCHAR(255)", 
java.sql.Types.VARCHAR))
     case BooleanType => Option(JdbcType("CHAR(1)", java.sql.Types.CHAR))
     case ByteType => Option(JdbcType("BYTEINT", java.sql.Types.TINYINT))
     case _ => None


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to