YannByron commented on code in PR #1459:
URL: https://github.com/apache/incubator-paimon/pull/1459#discussion_r1250117389


##########
paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/commands/PaimonCommand.scala:
##########
@@ -44,4 +47,50 @@ trait PaimonCommand {
         throw new RuntimeException("Failed to deserialize CommitMessage's 
object", e)
     }
   }
+
+  /**
+   * For the 'INSERT OVERWRITE' semantics of SQL, Spark DataSourceV2 will call 
the `truncate`
+   * methods where the `AlwaysTrue` Filter is used.
+   */
+  def isTruncate(filter: Filter): Boolean = {
+    val filters = splitConjunctiveFilters(filter)
+    filters.length == 1 && filters.head.isInstanceOf[AlwaysTrue]
+  }
+
+  /**
+   * For the 'INSERT OVERWRITE T PARTITION (partitionVal, ...)' semantics of 
SQL, Spark will
+   * transform `partitionVal`s to EqualNullSafe Filters.
+   */
+  def convertFilterToMap(
+      filter: Filter,
+      partitionColumns: Seq[String],
+      resolver: Resolver): Map[String, String] = {
+    splitConjunctiveFilters(filter).map {
+      case EqualNullSafe(attribute, value) =>
+        if (isNestedFilterInValue(value)) {
+          throw new RuntimeException(
+            s"Not support the complex partition value in EqualNullSafe when 
run `INSERT OVERWRITE`.")
+        } else {
+          partitionColumns.find(resolver(_, attribute)).map((_, 
value.toString)).getOrElse {

Review Comment:
   OK, and i'll add more UTs which use the different data types as the 
partition columns.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to