This is an automated email from the ASF dual-hosted git repository.

beliefer pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 840f74a18691 [SPARK-51022][SQL] Remove unused `tableSampleClause` in 
`build()` method of `MsSqlServerSQLQueryBuilder` and `DB2SQLQueryBuilder`
840f74a18691 is described below

commit 840f74a18691c00c4c38b53113b57826ad18b799
Author: Wei Guo <[email protected]>
AuthorDate: Wed Jan 29 19:12:52 2025 +0800

    [SPARK-51022][SQL] Remove unused `tableSampleClause` in `build()` method of 
`MsSqlServerSQLQueryBuilder` and `DB2SQLQueryBuilder`
    
    ### What changes were proposed in this pull request?
    
    This PR aims to remove unused `tableSampleClause` in `build()` method of 
`MsSqlServerSQLQueryBuilder` and `DB2SQLBuilder`.
    
    From the discussion here: 
https://github.com/apache/spark/pull/49711#discussion_r1932335212
    
    ### Why are the changes needed?
    
    Currently, `MsSqlServerDialect` and `DB2Dialect` don't support 
`tableSample` (default value of `supportsTableSample` is `false`), so 
`tableSampleClause` is unnecessary when building sql in the `build()` method.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    Updated a  related test case and passed GA.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No.
    
    Closes #49714 from wayneguow/jdbc_tablesample.
    
    Authored-by: Wei Guo <[email protected]>
    Signed-off-by: beliefer <[email protected]>
---
 .../apache/spark/sql/jdbc/v2/MsSqlServerIntegrationSuite.scala    | 8 ++++----
 .../src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala     | 2 +-
 .../main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala | 2 +-
 sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala | 2 +-
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git 
a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/MsSqlServerIntegrationSuite.scala
 
b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/MsSqlServerIntegrationSuite.scala
index fd7efb1efb76..04637c1b5563 100644
--- 
a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/MsSqlServerIntegrationSuite.scala
+++ 
b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/MsSqlServerIntegrationSuite.scala
@@ -171,7 +171,7 @@ class MsSqlServerIntegrationSuite extends 
DockerJDBCIntegrationV2Suite with V2JD
 
     // scalastyle:off
     assert(getExternalEngineQuery(df.queryExecution.executedPlan) ==
-      """SELECT  "dept","name","salary","bonus" FROM "employee"  WHERE (CASE 
WHEN ("name" = 'Legolas') THEN IIF(("name" = 'Elf'), 1, 0) ELSE IIF(("name" <> 
'Wizard'), 1, 0) END = 1)  """
+      """SELECT  "dept","name","salary","bonus" FROM "employee" WHERE (CASE 
WHEN ("name" = 'Legolas') THEN IIF(("name" = 'Elf'), 1, 0) ELSE IIF(("name" <> 
'Wizard'), 1, 0) END = 1)  """
     )
     // scalastyle:on
     df.collect()
@@ -186,7 +186,7 @@ class MsSqlServerIntegrationSuite extends 
DockerJDBCIntegrationV2Suite with V2JD
 
     // scalastyle:off
     assert(getExternalEngineQuery(df.queryExecution.executedPlan) ==
-      """SELECT  "dept","name","salary","bonus" FROM "employee"  WHERE (CASE 
WHEN ("name" = 'Legolas') THEN IIF(("name" = 'Elf'), 1, 0) ELSE 1 END = 1)  """
+      """SELECT  "dept","name","salary","bonus" FROM "employee" WHERE (CASE 
WHEN ("name" = 'Legolas') THEN IIF(("name" = 'Elf'), 1, 0) ELSE 1 END = 1)  """
     )
     // scalastyle:on
     df.collect()
@@ -203,7 +203,7 @@ class MsSqlServerIntegrationSuite extends 
DockerJDBCIntegrationV2Suite with V2JD
 
     // scalastyle:off
     assert(getExternalEngineQuery(df.queryExecution.executedPlan) ==
-      """SELECT  "dept","name","salary","bonus" FROM "employee"  WHERE (CASE 
WHEN ("name" = 'Legolas') THEN IIF((CASE WHEN ("name" = 'Elf') THEN IIF(("name" 
= 'Elrond'), 1, 0) ELSE IIF(("name" = 'Gandalf'), 1, 0) END = 1), 1, 0) ELSE 
IIF(("name" = 'Sauron'), 1, 0) END = 1)  """
+      """SELECT  "dept","name","salary","bonus" FROM "employee" WHERE (CASE 
WHEN ("name" = 'Legolas') THEN IIF((CASE WHEN ("name" = 'Elf') THEN IIF(("name" 
= 'Elrond'), 1, 0) ELSE IIF(("name" = 'Gandalf'), 1, 0) END = 1), 1, 0) ELSE 
IIF(("name" = 'Sauron'), 1, 0) END = 1)  """
     )
     // scalastyle:on
     df.collect()
@@ -220,7 +220,7 @@ class MsSqlServerIntegrationSuite extends 
DockerJDBCIntegrationV2Suite with V2JD
 
     // scalastyle:off
     assert(getExternalEngineQuery(df.queryExecution.executedPlan) ==
-      """SELECT  "dept","name","salary","bonus" FROM "employee"  WHERE ("name" 
IS NOT NULL) AND ((CASE WHEN "name" = 'Legolas' THEN CASE WHEN "name" = 'Elf' 
THEN 'Elf' ELSE 'Wizard' END ELSE 'Sauron' END) = "name")  """
+      """SELECT  "dept","name","salary","bonus" FROM "employee" WHERE ("name" 
IS NOT NULL) AND ((CASE WHEN "name" = 'Legolas' THEN CASE WHEN "name" = 'Elf' 
THEN 'Elf' ELSE 'Wizard' END ELSE 'Sauron' END) = "name")  """
     )
     // scalastyle:on
     df.collect()
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala
index f33e64c859fb..e818cc915951 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala
@@ -207,7 +207,7 @@ private case class DB2Dialect() extends JdbcDialect with 
SQLConfHelper with NoLe
       val offsetClause = dialect.getOffsetClause(offset)
 
       options.prepareQuery +
-        s"SELECT $columnList FROM ${options.tableOrQuery} $tableSampleClause" +
+        s"SELECT $columnList FROM ${options.tableOrQuery}" +
         s" $whereClause $groupByClause $orderByClause $offsetClause 
$limitClause"
     }
   }
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala
index 77d0891ce338..33fb93b168f9 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala
@@ -250,7 +250,7 @@ private case class MsSqlServerDialect() extends JdbcDialect 
with NoLegacyJDBCErr
 
       options.prepareQuery +
         s"SELECT $limitClause $columnList FROM ${options.tableOrQuery}" +
-        s" $tableSampleClause $whereClause $groupByClause $orderByClause"
+        s" $whereClause $groupByClause $orderByClause"
     }
   }
 
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
index db56da80fd4a..a1d83ee66508 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
@@ -1107,7 +1107,7 @@ class JDBCSuite extends QueryTest with SharedSparkSession 
{
         .withLimit(123)
         .build()
         .trim() ==
-      "SELECT a,b FROM test      FETCH FIRST 123 ROWS ONLY")
+      "SELECT a,b FROM test     FETCH FIRST 123 ROWS ONLY")
   }
 
   test("table exists query by jdbc dialect") {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to