huaxingao commented on a change in pull request #29396:
URL: https://github.com/apache/spark/pull/29396#discussion_r468266647
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCTableCatalogSuite.scala
##########
@@ -107,6 +111,41 @@ class JDBCTableCatalogSuite extends QueryTest with
SharedSparkSession {
}
}
+ test("simple scan") {
+ checkAnswer(sql("SELECT name, id FROM h2.test.people"), Seq(Row("fred",
1), Row("mary", 2)))
+ }
+
+ test("scan with filter push-down") {
+ val df = spark.table("h2.test.people").filter("id > 1")
+ val filters = df.queryExecution.optimizedPlan.collect {
+ case f: Filter => f
+ }
+ assert(filters.isEmpty)
+ checkAnswer(df, Row("mary", 2))
+ }
+
+ test("scan with column pruning") {
+ val df = spark.table("h2.test.people").select("id")
+ val scan = df.queryExecution.optimizedPlan.collectFirst {
+ case s: DataSourceV2ScanRelation => s
+ }.get
+ assert(scan.schema.names.sameElements(Seq("ID")))
+ checkAnswer(df, Seq(Row(1), Row(2)))
+ }
+
+ test("scan with filter push-down and column pruning") {
+ val df = spark.table("h2.test.people").filter("id > 1").select("name")
+ val filters = df.queryExecution.optimizedPlan.collect {
+ case f: Filter => f
+ }
+ assert(filters.isEmpty)
Review comment:
The filter is pushed down, so we are expecting empty filters here. If
`JDBCOptions.pushDownPredicate` is set to false, or the filter is not supported
by underlying database (then it can't be pushed down), the test will fail here.
I will add a comment in the test.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]