This is an automated email from the ASF dual-hosted git repository.

ruifengz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new ebf4dd1889f3 [MINOR][PYTHON][TESTS] Use different temp table name in 
foreachBatch tests
ebf4dd1889f3 is described below

commit ebf4dd1889f3404e856723f66851af48751b469f
Author: Hyukjin Kwon <gurwls...@apache.org>
AuthorDate: Wed Aug 13 12:24:30 2025 +0800

    [MINOR][PYTHON][TESTS] Use different temp table name in foreachBatch tests
    
    ### What changes were proposed in this pull request?
    
    This PR proposes to use different temp table name in foreachBatch tests.
    
    ### Why are the changes needed?
    
    I think it fixes 
https://github.com/apache/spark/actions/runs/16920826594/job/47946275434 . The 
same names affect the test when they are async executed.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No, test-only.
    
    ### How was this patch tested?
    
    Manually.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No.
    
    Closes #52002 from HyukjinKwon/minor-diff-table-name.
    
    Authored-by: Hyukjin Kwon <gurwls...@apache.org>
    Signed-off-by: Ruifeng Zheng <ruife...@apache.org>
---
 .../sql/tests/streaming/test_streaming_foreach_batch.py      | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/python/pyspark/sql/tests/streaming/test_streaming_foreach_batch.py 
b/python/pyspark/sql/tests/streaming/test_streaming_foreach_batch.py
index 2ff95fb9c154..380f089d6a55 100644
--- a/python/pyspark/sql/tests/streaming/test_streaming_foreach_batch.py
+++ b/python/pyspark/sql/tests/streaming/test_streaming_foreach_batch.py
@@ -29,18 +29,18 @@ class StreamingTestsForeachBatchMixin:
         q = None
 
         def collectBatch(batch_df, batch_id):
-            batch_df.write.format("parquet").saveAsTable("test_table")
+            batch_df.write.format("parquet").saveAsTable("test_table1")
 
         try:
             df = 
self.spark.readStream.format("text").load("python/test_support/sql/streaming")
             q = df.writeStream.foreachBatch(collectBatch).start()
             q.processAllAvailable()
-            collected = self.spark.sql("select * from test_table").collect()
+            collected = self.spark.sql("select * from test_table1").collect()
             self.assertTrue(len(collected), 2)
         finally:
             if q:
                 q.stop()
-            self.spark.sql("DROP TABLE IF EXISTS test_table")
+            self.spark.sql("DROP TABLE IF EXISTS test_table1")
 
     def test_streaming_foreach_batch_tempview(self):
         q = None
@@ -52,18 +52,18 @@ class StreamingTestsForeachBatchMixin:
             # streaming query
             assert len(batch_df.sparkSession.sql("SELECT * FROM 
updates").collect()) == 2
             # Write a table to verify on the repl/client side.
-            batch_df.write.format("parquet").saveAsTable("test_table")
+            batch_df.write.format("parquet").saveAsTable("test_table2")
 
         try:
             df = 
self.spark.readStream.format("text").load("python/test_support/sql/streaming")
             q = df.writeStream.foreachBatch(collectBatch).start()
             q.processAllAvailable()
-            collected = self.spark.sql("SELECT * FROM test_table").collect()
+            collected = self.spark.sql("SELECT * FROM test_table2").collect()
             self.assertTrue(len(collected[0]), 2)
         finally:
             if q:
                 q.stop()
-            self.spark.sql("DROP TABLE IF EXISTS test_table")
+            self.spark.sql("DROP TABLE IF EXISTS test_table2")
 
     def test_streaming_foreach_batch_propagates_python_errors(self):
         from pyspark.errors import StreamingQueryException


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to