HyukjinKwon commented on a change in pull request #25175: 
[SPARK-28411][PYTHON][SQL] InsertInto with overwrite is not honored
URL: https://github.com/apache/spark/pull/25175#discussion_r304212498
 
 

 ##########
 File path: python/pyspark/sql/tests/test_readwriter.py
 ##########
 @@ -141,6 +141,26 @@ def count_bucketed_cols(names, table="pyspark_bucket"):
                 .mode("overwrite").saveAsTable("pyspark_bucket"))
             self.assertSetEqual(set(data), 
set(self.spark.table("pyspark_bucket").collect()))
 
+    def test_insert_into(self):
+        df = self.spark.createDataFrame([("a", 1), ("b", 2)], ["C1", "C2"])
+        with self.table("test_table"):
+            df.write.saveAsTable("test_table")
+            self.assertEqual(2, self.spark.sql("select * from 
test_table").count())
+
+            df.write.insertInto("test_table")
+            self.assertEqual(4, self.spark.sql("select * from 
test_table").count())
+
+            df.write.mode("overwrite").insertInto("test_table")
+            self.assertEqual(2, self.spark.sql("select * from 
test_table").count())
+
+            df.write.insertInto("test_table", True)
+            self.assertEqual(2, self.spark.sql("select * from 
test_table").count())
+
+            df.write.insertInto("test_table", False)
+            self.assertEqual(4, self.spark.sql("select * from 
test_table").count())
+
+            # self.spark.sql("drop table test_table")
 
 Review comment:
   seems a mistake.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to