rdblue commented on a change in pull request #24832: [SPARK-27845][SQL] 
DataSourceV2: InsertTable
URL: https://github.com/apache/spark/pull/24832#discussion_r300785218
 
 

 ##########
 File path: 
sql/core/src/test/scala/org/apache/spark/sql/sources/v2/TestInMemoryTableCatalog.scala
 ##########
 @@ -149,49 +161,89 @@ private class InMemoryTable(
   }
 
   override def newWriteBuilder(options: CaseInsensitiveStringMap): 
WriteBuilder = {
-    new WriteBuilder with SupportsTruncate {
-      private var shouldTruncate: Boolean = false
+    new WriteBuilder with SupportsTruncate with SupportsOverwrite with 
SupportsDynamicOverwrite {
+      private var writer: BatchWrite = Append
 
       override def truncate(): WriteBuilder = {
-        shouldTruncate = true
+        assert(writer == Append)
+        writer = TruncateAndAppend
+        this
+      }
+
+      override def overwrite(filters: Array[Filter]): WriteBuilder = {
+        assert(writer == Append)
+        writer = new Overwrite(filters)
         this
       }
 
-      override def buildForBatch(): BatchWrite = {
-        if (shouldTruncate) TruncateAndAppend else Append
+      override def overwriteDynamicPartitions(): WriteBuilder = {
+        assert(writer == Append)
+        writer = DynamicOverwrite
+        this
       }
+
+      override def buildForBatch(): BatchWrite = writer
     }
   }
 
-  private object TruncateAndAppend extends BatchWrite {
+  private abstract class TestBatchWrite extends BatchWrite {
     override def createBatchWriterFactory(): DataWriterFactory = {
       BufferedRowsWriterFactory
     }
 
-    override def commit(messages: Array[WriterCommitMessage]): Unit = {
-      replaceData(messages.map(_.asInstanceOf[BufferedRows]))
+    override def abort(messages: Array[WriterCommitMessage]): Unit = {
     }
+  }
 
-    override def abort(messages: Array[WriterCommitMessage]): Unit = {
+  private object Append extends TestBatchWrite {
+    override def commit(messages: Array[WriterCommitMessage]): Unit = 
dataMap.synchronized {
+      withData(messages.map(_.asInstanceOf[BufferedRows]))
     }
   }
 
-  private object Append extends BatchWrite {
-    override def createBatchWriterFactory(): DataWriterFactory = {
-      BufferedRowsWriterFactory
+  private object DynamicOverwrite extends TestBatchWrite {
+    override def commit(messages: Array[WriterCommitMessage]): Unit = 
dataMap.synchronized {
+      val newData = messages.map(_.asInstanceOf[BufferedRows])
+      dataMap --= newData.flatMap(_.rows.map(getKey))
+      withData(newData)
     }
+  }
 
-    override def commit(messages: Array[WriterCommitMessage]): Unit = {
-      replaceData(data ++ messages.map(_.asInstanceOf[BufferedRows]))
+  private class Overwrite(filters: Array[Filter]) extends TestBatchWrite {
+    override def commit(messages: Array[WriterCommitMessage]): Unit = 
dataMap.synchronized {
+      val deleteKeys = dataMap.keys.filter { partValues =>
+        filters.exists {
 
 Review comment:
   Looks like this matches a key if any value matches a filter expression. 
`exists` Scaladoc says "Tests whether a predicate holds for at least one 
value", so this is implementing an OR of all the filters, but the desired 
behavior is an AND of all the filters.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to