dongjoon-hyun commented on a change in pull request #33350:
URL: https://github.com/apache/spark/pull/33350#discussion_r670933413



##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/PruneFileSourcePartitionsSuite.scala
##########
@@ -42,35 +43,27 @@ class PruneFileSourcePartitionsSuite extends 
PrunePartitionSuiteBase {
 
   test("PruneFileSourcePartitions should not change the output of 
LogicalRelation") {
     withTable("test") {
-      withTempDir { dir =>
-        sql(
-          s"""
-            |CREATE EXTERNAL TABLE test(i int)
-            |PARTITIONED BY (p int)
-            |STORED AS parquet
-            |LOCATION '${dir.toURI}'""".stripMargin)
-
-        val tableMeta = spark.sharedState.externalCatalog.getTable("default", 
"test")
-        val catalogFileIndex = new CatalogFileIndex(spark, tableMeta, 0)
-
-        val dataSchema = StructType(tableMeta.schema.filterNot { f =>
-          tableMeta.partitionColumnNames.contains(f.name)
-        })
-        val relation = HadoopFsRelation(
-          location = catalogFileIndex,
-          partitionSchema = tableMeta.partitionSchema,
-          dataSchema = dataSchema,
-          bucketSpec = None,
-          fileFormat = new ParquetFileFormat(),
-          options = Map.empty)(sparkSession = spark)
-
-        val logicalRelation = LogicalRelation(relation, tableMeta)
-        val query = Project(Seq(Symbol("i"), Symbol("p")),
-          Filter(Symbol("p") === 1, logicalRelation)).analyze
-
-        val optimized = Optimize.execute(query)
-        assert(optimized.missingInput.isEmpty)
-      }
+      spark.range(10).selectExpr("id", "id % 3 as 
p").write.partitionBy("p").saveAsTable("test")

Review comment:
       As you know, `saveAsTable` is different from `STORED AS` parquet. The 
original test coverage seems to be coupled with `convertMetastoreParquet`, but 
this one looks different. Are we losing the existing test coverage?
   
   ```
   scala> spark.range(10).selectExpr("id", "id % 3 as 
p").write.partitionBy("p").saveAsTable("t1")
   
   scala> sql("DESCRIBE TABLE EXTENDED t1").show()
   ...
   |            Provider|             parquet|       |
   ...
   scala> sql("CREATE TABLE t2(a int) STORED AS parquet").show()
   scala> sql("DESCRIBE TABLE EXTENDED t2").show()
   ...
   |            Provider|                hive|       |
   ...
   ```

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/PruneFileSourcePartitionsSuite.scala
##########
@@ -42,35 +43,27 @@ class PruneFileSourcePartitionsSuite extends 
PrunePartitionSuiteBase {
 
   test("PruneFileSourcePartitions should not change the output of 
LogicalRelation") {
     withTable("test") {
-      withTempDir { dir =>
-        sql(
-          s"""
-            |CREATE EXTERNAL TABLE test(i int)
-            |PARTITIONED BY (p int)
-            |STORED AS parquet
-            |LOCATION '${dir.toURI}'""".stripMargin)
-
-        val tableMeta = spark.sharedState.externalCatalog.getTable("default", 
"test")
-        val catalogFileIndex = new CatalogFileIndex(spark, tableMeta, 0)
-
-        val dataSchema = StructType(tableMeta.schema.filterNot { f =>
-          tableMeta.partitionColumnNames.contains(f.name)
-        })
-        val relation = HadoopFsRelation(
-          location = catalogFileIndex,
-          partitionSchema = tableMeta.partitionSchema,
-          dataSchema = dataSchema,
-          bucketSpec = None,
-          fileFormat = new ParquetFileFormat(),
-          options = Map.empty)(sparkSession = spark)
-
-        val logicalRelation = LogicalRelation(relation, tableMeta)
-        val query = Project(Seq(Symbol("i"), Symbol("p")),
-          Filter(Symbol("p") === 1, logicalRelation)).analyze
-
-        val optimized = Optimize.execute(query)
-        assert(optimized.missingInput.isEmpty)
-      }
+      spark.range(10).selectExpr("id", "id % 3 as 
p").write.partitionBy("p").saveAsTable("test")

Review comment:
       As you know, `saveAsTable` is different from `STORED AS` parquet. The 
original test coverage seems to be coupled with `convertMetastoreParquet`, but 
this one looks different. Are we losing the existing test coverage?
   
   ```scala
   scala> spark.range(10).selectExpr("id", "id % 3 as 
p").write.partitionBy("p").saveAsTable("t1")
   
   scala> sql("DESCRIBE TABLE EXTENDED t1").show()
   ...
   |            Provider|             parquet|       |
   ...
   scala> sql("CREATE TABLE t2(a int) STORED AS parquet").show()
   scala> sql("DESCRIBE TABLE EXTENDED t2").show()
   ...
   |            Provider|                hive|       |
   ...
   ```




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to