This is an automated email from the ASF dual-hosted git repository. dongjoon pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push: new 9ba70d6ed302 [SPARK-47514][SQL][TESTS] Add a test coverage for createTable method (partitioned-table) in CatalogSuite 9ba70d6ed302 is described below commit 9ba70d6ed3029b444d6a37835eb27c6916e5c78a Author: panbingkun <panbing...@baidu.com> AuthorDate: Thu Mar 21 20:57:25 2024 -0700 [SPARK-47514][SQL][TESTS] Add a test coverage for createTable method (partitioned-table) in CatalogSuite ### What changes were proposed in this pull request? The pr aims to add a test coverage for createTable method (`partitioned-table`) in `CatalogSuite`. ### Why are the changes needed? Currently, the UT about `createTable` the partitions are `empty`. Let's improve it. ### Does this PR introduce _any_ user-facing change? No, only for tests. ### How was this patch tested? - Manually test. - Pass GA. ### Was this patch authored or co-authored using generative AI tooling? No. Closes #45637 from panbingkun/minor_catalogsuites. Authored-by: panbingkun <panbing...@baidu.com> Signed-off-by: Dongjoon Hyun <dh...@apple.com> --- .../spark/sql/connector/catalog/CatalogSuite.scala | 27 ++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/CatalogSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/CatalogSuite.scala index 145bfd286123..e20dfd4f6051 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/CatalogSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/CatalogSuite.scala @@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.analysis.{NamespaceAlreadyExistsException, import org.apache.spark.sql.catalyst.parser.CatalystSqlParser import org.apache.spark.sql.catalyst.util.quoteIdentifier import org.apache.spark.sql.connector.catalog.functions.{BoundFunction, ScalarFunction, UnboundFunction} -import org.apache.spark.sql.connector.expressions.{LogicalExpressions, Transform} +import org.apache.spark.sql.connector.expressions.{Expressions, LogicalExpressions, Transform} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types.{DataType, DoubleType, IntegerType, LongType, StringType, StructType, TimestampType} import org.apache.spark.sql.util.CaseInsensitiveStringMap @@ -96,7 +96,7 @@ class CatalogSuite extends SparkFunSuite { assert(catalog.listTables(Array("ns2")).toSet == Set(ident3)) } - test("createTable") { + test("createTable: non-partitioned table") { val catalog = newCatalog() assert(!catalog.tableExists(testIdent)) @@ -111,6 +111,29 @@ class CatalogSuite extends SparkFunSuite { assert(catalog.tableExists(testIdent)) } + test("createTable: partitioned table") { + val partCatalog = new InMemoryPartitionTableCatalog + partCatalog.initialize("test", CaseInsensitiveStringMap.empty()) + + assert(!partCatalog.tableExists(testIdent)) + + val columns = Array( + Column.create("col0", IntegerType), + Column.create("part0", IntegerType)) + val table = partCatalog.createTable( + testIdent, + columns, + Array[Transform](Expressions.identity("part0")), + util.Collections.emptyMap[String, String]) + + val parsed = CatalystSqlParser.parseMultipartIdentifier(table.name) + assert(parsed == Seq("test", "`", ".", "test_table")) + assert(table.columns === columns) + assert(table.properties.asScala == Map()) + + assert(partCatalog.tableExists(testIdent)) + } + test("createTable: with properties") { val catalog = newCatalog() --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org