Github user dongjoon-hyun commented on a diff in the pull request:
https://github.com/apache/spark/pull/20057#discussion_r169692801
--- Diff: sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
---
@@ -839,35 +839,68 @@ class JDBCSuite extends SparkFunSuite
}
test("table exists query by jdbc dialect") {
- val MySQL = JdbcDialects.get("jdbc:mysql://127.0.0.1/db")
- val Postgres = JdbcDialects.get("jdbc:postgresql://127.0.0.1/db")
+ val mysql = JdbcDialects.get("jdbc:mysql://127.0.0.1/db")
+ val postgres = JdbcDialects.get("jdbc:postgresql://127.0.0.1/db")
val db2 = JdbcDialects.get("jdbc:db2://127.0.0.1/db")
val h2 = JdbcDialects.get(url)
val derby = JdbcDialects.get("jdbc:derby:db")
val table = "weblogs"
val defaultQuery = s"SELECT * FROM $table WHERE 1=0"
val limitQuery = s"SELECT 1 FROM $table LIMIT 1"
- assert(MySQL.getTableExistsQuery(table) == limitQuery)
- assert(Postgres.getTableExistsQuery(table) == limitQuery)
+ assert(mysql.getTableExistsQuery(table) == limitQuery)
+ assert(postgres.getTableExistsQuery(table) == limitQuery)
assert(db2.getTableExistsQuery(table) == defaultQuery)
assert(h2.getTableExistsQuery(table) == defaultQuery)
assert(derby.getTableExistsQuery(table) == defaultQuery)
}
test("truncate table query by jdbc dialect") {
- val MySQL = JdbcDialects.get("jdbc:mysql://127.0.0.1/db")
- val Postgres = JdbcDialects.get("jdbc:postgresql://127.0.0.1/db")
+ val mysql = JdbcDialects.get("jdbc:mysql://127.0.0.1/db")
+ val postgres = JdbcDialects.get("jdbc:postgresql://127.0.0.1/db")
val db2 = JdbcDialects.get("jdbc:db2://127.0.0.1/db")
val h2 = JdbcDialects.get(url)
val derby = JdbcDialects.get("jdbc:derby:db")
+ val oracle = JdbcDialects.get("jdbc:oracle://127.0.0.1/db")
+ val teradata = JdbcDialects.get("jdbc:teradata://127.0.0.1/db")
+
val table = "weblogs"
val defaultQuery = s"TRUNCATE TABLE $table"
val postgresQuery = s"TRUNCATE TABLE ONLY $table"
- assert(MySQL.getTruncateQuery(table) == defaultQuery)
- assert(Postgres.getTruncateQuery(table) == postgresQuery)
+ val teradataQuery = s"DELETE FROM $table ALL"
+
+ assert(mysql.getTruncateQuery(table) == defaultQuery)
+ assert(postgres.getTruncateQuery(table) == postgresQuery)
assert(db2.getTruncateQuery(table) == defaultQuery)
assert(h2.getTruncateQuery(table) == defaultQuery)
assert(derby.getTruncateQuery(table) == defaultQuery)
+ assert(oracle.getTruncateQuery(table) == defaultQuery)
+ assert(teradata.getTruncateQuery(table) == teradataQuery)
+ }
+
+ test("SPARK-22880: Truncate table with CASCADE by jdbc dialect") {
+ // cascade in a truncate should only be applied for databases that
support this,
+ // even if the parameter is passed.
+ val mysql = JdbcDialects.get("jdbc:mysql://127.0.0.1/db")
+ val postgres = JdbcDialects.get("jdbc:postgresql://127.0.0.1/db")
+ val db2 = JdbcDialects.get("jdbc:db2://127.0.0.1/db")
+ val h2 = JdbcDialects.get(url)
+ val derby = JdbcDialects.get("jdbc:derby:db")
+ val oracle = JdbcDialects.get("jdbc:oracle://127.0.0.1/db")
+ val teradata = JdbcDialects.get("jdbc:teradata://127.0.0.1/db")
+
+ val table = "weblogs"
+ val defaultQuery = s"TRUNCATE TABLE $table"
+ val postgresQuery = s"TRUNCATE TABLE ONLY $table CASCADE"
+ val oracleQuery = s"TRUNCATE TABLE $table CASCADE"
+ val teradataQuery = s"DELETE FROM $table ALL"
+
+ assert(mysql.getTruncateQuery(table, Some(true)) == defaultQuery)
+ assert(postgres.getTruncateQuery(table, Some(true)) == postgresQuery)
+ assert(db2.getTruncateQuery(table, Some(true)) == defaultQuery)
+ assert(h2.getTruncateQuery(table, Some(true)) == defaultQuery)
+ assert(derby.getTruncateQuery(table, Some(true)) == defaultQuery)
+ assert(oracle.getTruncateQuery(table, Some(true)) == oracleQuery)
+ assert(teradata.getTruncateQuery(table, Some(true)) == teradataQuery)
--- End diff --
Let's group the same one like the following.
```
Seq(mysql, db2, h2, derby).foreach { dialect =>
assert(dialect.getTruncateQuery(table, Some(true)) == defaultQuery)
}
```
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]