MaxGekk commented on a change in pull request #31326:
URL: https://github.com/apache/spark/pull/31326#discussion_r563950041



##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
##########
@@ -1734,9 +1735,8 @@ abstract class DDLSuite extends QueryTest with 
SQLTestUtils {
 
     // null partition values
     createTablePartition(catalog, Map("a" -> null, "b" -> null), tableIdent)
-    val nullPartValue = if (isUsingHiveMetastore) "__HIVE_DEFAULT_PARTITION__" 
else null
     assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
-      Set(Map("a" -> nullPartValue, "b" -> nullPartValue)))
+      Set(Map("a" -> "__HIVE_DEFAULT_PARTITION__", "b" -> 
"__HIVE_DEFAULT_PARTITION__")))

Review comment:
       Now, the `In-Memory` catalog behaves similarly to Hive external catalog, 
so, we don't need to distinguish them in tests.

##########
File path: sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
##########
@@ -3575,15 +3575,6 @@ class SQLQuerySuite extends QueryTest with 
SharedSparkSession with AdaptiveSpark
       }
     })
   }
-
-  test("SPARK-33591: null as a partition value") {
-    val t = "part_table"
-    withTable(t) {
-      sql(s"CREATE TABLE $t (col1 INT, p1 STRING) USING PARQUET PARTITIONED BY 
(p1)")
-      sql(s"INSERT INTO TABLE $t PARTITION (p1 = null) SELECT 0")
-      checkAnswer(sql(s"SELECT * FROM $t"), Row(0, null))
-    }

Review comment:
       New test covers both v1 In-Memory and Hive external catalogs.

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
##########
@@ -1734,9 +1735,8 @@ abstract class DDLSuite extends QueryTest with 
SQLTestUtils {
 
     // null partition values
     createTablePartition(catalog, Map("a" -> null, "b" -> null), tableIdent)
-    val nullPartValue = if (isUsingHiveMetastore) "__HIVE_DEFAULT_PARTITION__" 
else null
     assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
-      Set(Map("a" -> nullPartValue, "b" -> nullPartValue)))
+      Set(Map("a" -> "__HIVE_DEFAULT_PARTITION__", "b" -> 
"__HIVE_DEFAULT_PARTITION__")))

Review comment:
       In master, we already have common settings in unified tests:
   
https://github.com/apache/spark/blob/861f8bb5fb82e53a223ae121737fd6d54ab8ba52/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala#L36

##########
File path: sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
##########
@@ -3575,15 +3575,6 @@ class SQLQuerySuite extends QueryTest with 
SharedSparkSession with AdaptiveSpark
       }
     })
   }
-
-  test("SPARK-33591: null as a partition value") {
-    val t = "part_table"
-    withTable(t) {
-      sql(s"CREATE TABLE $t (col1 INT, p1 STRING) USING PARQUET PARTITIONED BY 
(p1)")
-      sql(s"INSERT INTO TABLE $t PARTITION (p1 = null) SELECT 0")
-      checkAnswer(sql(s"SELECT * FROM $t"), Row(0, null))
-    }

Review comment:
       New test covers both v1 In-Memory and Hive external catalogs because it 
runs as a part of `InMemoryCatalogedDDLSuite` and `HiveCatalogedDDLSuite`.

##########
File path: sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
##########
@@ -3575,15 +3575,6 @@ class SQLQuerySuite extends QueryTest with 
SharedSparkSession with AdaptiveSpark
       }
     })
   }
-
-  test("SPARK-33591: null as a partition value") {
-    val t = "part_table"
-    withTable(t) {
-      sql(s"CREATE TABLE $t (col1 INT, p1 STRING) USING PARQUET PARTITIONED BY 
(p1)")
-      sql(s"INSERT INTO TABLE $t PARTITION (p1 = null) SELECT 0")
-      checkAnswer(sql(s"SELECT * FROM $t"), Row(0, null))
-    }

Review comment:
       Here it is https://github.com/apache/spark/pull/31331




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to