cloud-fan commented on a change in pull request #29396: URL: https://github.com/apache/spark/pull/29396#discussion_r478243229
########## File path: sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala ########## @@ -0,0 +1,210 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.jdbc + +import java.sql.{Connection, DriverManager} +import java.util.Properties + +import org.apache.spark.SparkConf +import org.apache.spark.sql.{QueryTest, Row} +import org.apache.spark.sql.catalyst.analysis.CannotReplaceMissingTableException +import org.apache.spark.sql.catalyst.plans.logical.Filter +import org.apache.spark.sql.execution.datasources.v2.DataSourceV2ScanRelation +import org.apache.spark.sql.execution.datasources.v2.jdbc.JDBCTableCatalog +import org.apache.spark.sql.functions.lit +import org.apache.spark.sql.test.SharedSparkSession +import org.apache.spark.util.Utils + +class JDBCV2Suite extends QueryTest with SharedSparkSession { + import testImplicits._ + + val tempDir = Utils.createTempDir() + val url = s"jdbc:h2:${tempDir.getCanonicalPath};user=testUser;password=testPass" + var conn: java.sql.Connection = null + + override def sparkConf: SparkConf = super.sparkConf + .set("spark.sql.catalog.h2", classOf[JDBCTableCatalog].getName) + .set("spark.sql.catalog.h2.url", url) + .set("spark.sql.catalog.h2.driver", "org.h2.Driver") + + private def withConnection[T](f: Connection => T): T = { + val conn = DriverManager.getConnection(url, new Properties()) + try { + f(conn) + } finally { + conn.close() + } + } + + override def beforeAll(): Unit = { + super.beforeAll() + Utils.classForName("org.h2.Driver") + withConnection { conn => + conn.prepareStatement("CREATE SCHEMA \"test\"").executeUpdate() + conn.prepareStatement( + "CREATE TABLE \"test\".\"empty_table\" (name TEXT(32) NOT NULL, id INTEGER NOT NULL)") + .executeUpdate() + conn.prepareStatement( + "CREATE TABLE \"test\".\"people\" (name TEXT(32) NOT NULL, id INTEGER NOT NULL)") + .executeUpdate() + conn.prepareStatement("INSERT INTO \"test\".\"people\" VALUES ('fred', 1)").executeUpdate() + conn.prepareStatement("INSERT INTO \"test\".\"people\" VALUES ('mary', 2)").executeUpdate() + } + } + + override def afterAll(): Unit = { + Utils.deleteRecursively(tempDir) + super.afterAll() + } + + test("simple scan") { + checkAnswer(sql("SELECT * FROM h2.test.empty_table"), Seq()) + checkAnswer(sql("SELECT * FROM h2.test.people"), Seq(Row("fred", 1), Row("mary", 2))) + checkAnswer(sql("SELECT name, id FROM h2.test.people"), Seq(Row("fred", 1), Row("mary", 2))) + } + + test("scan with filter push-down") { + val df = spark.table("h2.test.people").filter($"id" > 1) + val filters = df.queryExecution.optimizedPlan.collect { + case f: Filter => f + } + assert(filters.isEmpty) + checkAnswer(df, Row("mary", 2)) + } + + test("scan with column pruning") { + val df = spark.table("h2.test.people").select("id") + val scan = df.queryExecution.optimizedPlan.collectFirst { + case s: DataSourceV2ScanRelation => s + }.get + assert(scan.schema.names.sameElements(Seq("ID"))) + checkAnswer(df, Seq(Row(1), Row(2))) + } + + test("scan with filter push-down and column pruning") { + val df = spark.table("h2.test.people").filter($"id" > 1).select("name") + val filters = df.queryExecution.optimizedPlan.collect { + case f: Filter => f + } + assert(filters.isEmpty) + val scan = df.queryExecution.optimizedPlan.collectFirst { + case s: DataSourceV2ScanRelation => s + }.get + assert(scan.schema.names.sameElements(Seq("NAME"))) + checkAnswer(df, Row("mary")) + } + + // TODO (SPARK-32592): this doesn't work because `DataFrameReader.table` ignores the options. + ignore("scan with partition info") { + val df = spark.read + .option("partitionColumn", "id") + .option("lowerBound", "0") + .option("upperBound", "3") + .option("numPartitions", "2") + .table("h2.test.people") + assert(df.rdd.getNumPartitions == 2) + } + + test("show tables") { + checkAnswer(sql("SHOW TABLES IN h2.test"), + Seq(Row("test", "people"), Row("test", "empty_table"))) + } + + // TODO (SPARK-32603): Operation not allowed: CREATE TABLE ... STORED AS ... does not support + // multi-part identifiers + ignore("create/drop table") { + sql("CREATE TABLE h2.test.abc(i INT, j STRING)") + checkAnswer(sql("SHOW TABLES IN h2.test"), Seq(Row("test", "people"), Row("test", "abc"))) + sql("DROP TABLE h2.test.abc") + checkAnswer(sql("SHOW TABLES IN h2.test"), Row("test", "people")) + } + + // TODO (SPARK-32603): Operation not allowed: CREATE TABLE ... STORED AS ... does not support + // multi-part identifiers + ignore("SQL API: create table as select") { + withTable("h2.test.abc") { + sql("CREATE TABLE h2.test.abc AS SELECT * FROM h2.test.people") + checkAnswer(sql("SELECT name, id FROM h2.test.abc"), Seq(Row("fred", 1), Row("mary", 2))) + } + } + + test("DataFrameWriterV2: create table as select") { + withTable("h2.test.abc") { + spark.table("h2.test.people").writeTo("h2.test.abc").create() + checkAnswer(sql("SELECT name, id FROM h2.test.abc"), Seq(Row("fred", 1), Row("mary", 2))) + } + } + + // TODO (SPARK-32603): ParseException: mismatched input 'AS' expecting {'(', 'USING'} + ignore("SQL API: replace table as select") { + withTable("h2.test.abc") { + intercept[CannotReplaceMissingTableException] { + sql("REPLACE TABLE h2.test.abc AS SELECT 1 as col") + } + sql("CREATE OR REPLACE TABLE h2.test.abc AS SELECT 1 as col") + checkAnswer(sql("SELECT col FROM h2.test.abc"), Row(1)) + sql("REPLACE TABLE h2.test.abc AS SELECT * FROM h2.test.people") + checkAnswer(sql("SELECT name, id FROM h2.test.abc"), Seq(Row("fred", 1), Row("mary", 2))) + } + } + + test("DataFrameWriterV2: replace table as select") { + withTable("h2.test.abc") { + intercept[CannotReplaceMissingTableException] { + sql("SELECT 1 AS col").writeTo("h2.test.abc").replace() + } + sql("SELECT 1 AS col").writeTo("h2.test.abc").createOrReplace() + checkAnswer(sql("SELECT col FROM h2.test.abc"), Row(1)) + spark.table("h2.test.people").writeTo("h2.test.abc").replace() + checkAnswer(sql("SELECT name, id FROM h2.test.abc"), Seq(Row("fred", 1), Row("mary", 2))) + } + } + + // TODO (SPARK-32603): Operation not allowed: CREATE TABLE ... STORED AS ... does not support + // multi-part identifiers Review comment: ditto ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
