This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.5
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.5 by this push:
     new 89a608ba41b [SPARK-42944][PYTHON][FOLLOW-UP][3.5] Rename tests from 
foreachBatch to foreach_batch
89a608ba41b is described below

commit 89a608ba41bceafe3e79978d8998c09fa4a83740
Author: Hyukjin Kwon <[email protected]>
AuthorDate: Mon Aug 28 13:32:44 2023 +0900

    [SPARK-42944][PYTHON][FOLLOW-UP][3.5] Rename tests from foreachBatch to 
foreach_batch
    
    This PR cherry-picks https://github.com/apache/spark/pull/42675 to 
branch-3.5.
    
    ---
    
    ### What changes were proposed in this pull request?
    
    This PR proposes to rename tests from foreachBatch to foreach_batch.
    
    ### Why are the changes needed?
    
    Non-API should follow snake_naming rule per PEP 8.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No, dev-only.
    
    ### How was this patch tested?
    
    CI in this PR should test it out.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    No.
    
    Closes #42692 from HyukjinKwon/SPARK-42944-3.5.
    
    Authored-by: Hyukjin Kwon <[email protected]>
    Signed-off-by: Hyukjin Kwon <[email protected]>
---
 .../sql/connect/planner/StreamingForeachBatchHelper.scala    |  2 +-
 dev/sparktestsupport/modules.py                              |  4 ++--
 .../{foreachBatch_worker.py => foreach_batch_worker.py}      |  0
 ...t_parity_foreachBatch.py => test_parity_foreach_batch.py} | 12 ++++++------
 ...aming_foreachBatch.py => test_streaming_foreach_batch.py} | 10 +++++-----
 5 files changed, 14 insertions(+), 14 deletions(-)

diff --git 
a/connector/connect/server/src/main/scala/org/apache/spark/sql/connect/planner/StreamingForeachBatchHelper.scala
 
b/connector/connect/server/src/main/scala/org/apache/spark/sql/connect/planner/StreamingForeachBatchHelper.scala
index 21e4adb9896..9c74c8b69ae 100644
--- 
a/connector/connect/server/src/main/scala/org/apache/spark/sql/connect/planner/StreamingForeachBatchHelper.scala
+++ 
b/connector/connect/server/src/main/scala/org/apache/spark/sql/connect/planner/StreamingForeachBatchHelper.scala
@@ -108,7 +108,7 @@ object StreamingForeachBatchHelper extends Logging {
       pythonFn,
       connectUrl,
       sessionHolder.sessionId,
-      "pyspark.sql.connect.streaming.worker.foreachBatch_worker")
+      "pyspark.sql.connect.streaming.worker.foreach_batch_worker")
     val (dataOut, dataIn) = runner.init()
 
     val foreachBatchRunnerFn: FnArgsWithId => Unit = (args: FnArgsWithId) => {
diff --git a/dev/sparktestsupport/modules.py b/dev/sparktestsupport/modules.py
index 2af7b83c7a0..33d253a47ea 100644
--- a/dev/sparktestsupport/modules.py
+++ b/dev/sparktestsupport/modules.py
@@ -497,7 +497,7 @@ pyspark_sql = Module(
         "pyspark.sql.tests.test_session",
         "pyspark.sql.tests.streaming.test_streaming",
         "pyspark.sql.tests.streaming.test_streaming_foreach",
-        "pyspark.sql.tests.streaming.test_streaming_foreachBatch",
+        "pyspark.sql.tests.streaming.test_streaming_foreach_batch",
         "pyspark.sql.tests.streaming.test_streaming_listener",
         "pyspark.sql.tests.test_types",
         "pyspark.sql.tests.test_udf",
@@ -868,7 +868,7 @@ pyspark_connect = Module(
         "pyspark.sql.tests.connect.streaming.test_parity_streaming",
         "pyspark.sql.tests.connect.streaming.test_parity_listener",
         "pyspark.sql.tests.connect.streaming.test_parity_foreach",
-        "pyspark.sql.tests.connect.streaming.test_parity_foreachBatch",
+        "pyspark.sql.tests.connect.streaming.test_parity_foreach_batch",
         "pyspark.sql.tests.connect.test_parity_pandas_grouped_map_with_state",
         "pyspark.sql.tests.connect.test_parity_pandas_udf_scalar",
         "pyspark.sql.tests.connect.test_parity_pandas_udf_grouped_agg",
diff --git a/python/pyspark/sql/connect/streaming/worker/foreachBatch_worker.py 
b/python/pyspark/sql/connect/streaming/worker/foreach_batch_worker.py
similarity index 100%
rename from python/pyspark/sql/connect/streaming/worker/foreachBatch_worker.py
rename to python/pyspark/sql/connect/streaming/worker/foreach_batch_worker.py
diff --git 
a/python/pyspark/sql/tests/connect/streaming/test_parity_foreachBatch.py 
b/python/pyspark/sql/tests/connect/streaming/test_parity_foreach_batch.py
similarity index 79%
rename from 
python/pyspark/sql/tests/connect/streaming/test_parity_foreachBatch.py
rename to 
python/pyspark/sql/tests/connect/streaming/test_parity_foreach_batch.py
index 01108c95391..74371b6d407 100644
--- a/python/pyspark/sql/tests/connect/streaming/test_parity_foreachBatch.py
+++ b/python/pyspark/sql/tests/connect/streaming/test_parity_foreach_batch.py
@@ -17,23 +17,23 @@
 
 import unittest
 
-from pyspark.sql.tests.streaming.test_streaming_foreachBatch import 
StreamingTestsForeachBatchMixin
+from pyspark.sql.tests.streaming.test_streaming_foreach_batch import 
StreamingTestsForeachBatchMixin
 from pyspark.testing.connectutils import ReusedConnectTestCase
 
 
 class StreamingForeachBatchParityTests(StreamingTestsForeachBatchMixin, 
ReusedConnectTestCase):
     @unittest.skip("SPARK-44463: Error handling needs improvement in connect 
foreachBatch")
-    def test_streaming_foreachBatch_propagates_python_errors(self):
-        super().test_streaming_foreachBatch_propagates_python_errors
+    def test_streaming_foreach_batch_propagates_python_errors(self):
+        super().test_streaming_foreach_batch_propagates_python_errors()
 
     @unittest.skip("This seems specific to py4j and pinned threads. The 
intention is unclear")
-    def test_streaming_foreachBatch_graceful_stop(self):
-        super().test_streaming_foreachBatch_graceful_stop()
+    def test_streaming_foreach_batch_graceful_stop(self):
+        super().test_streaming_foreach_batch_graceful_stop()
 
 
 if __name__ == "__main__":
     import unittest
-    from pyspark.sql.tests.connect.streaming.test_parity_foreachBatch import * 
 # noqa: F401,E501
+    from pyspark.sql.tests.connect.streaming.test_parity_foreach_batch import 
*  # noqa: F401,E501
 
     try:
         import xmlrunner  # type: ignore[import]
diff --git a/python/pyspark/sql/tests/streaming/test_streaming_foreachBatch.py 
b/python/pyspark/sql/tests/streaming/test_streaming_foreach_batch.py
similarity index 92%
rename from python/pyspark/sql/tests/streaming/test_streaming_foreachBatch.py
rename to python/pyspark/sql/tests/streaming/test_streaming_foreach_batch.py
index d4e185c3d85..393101a096e 100644
--- a/python/pyspark/sql/tests/streaming/test_streaming_foreachBatch.py
+++ b/python/pyspark/sql/tests/streaming/test_streaming_foreach_batch.py
@@ -21,7 +21,7 @@ from pyspark.testing.sqlutils import ReusedSQLTestCase
 
 
 class StreamingTestsForeachBatchMixin:
-    def test_streaming_foreachBatch(self):
+    def test_streaming_foreach_batch(self):
         q = None
 
         def collectBatch(batch_df, batch_id):
@@ -37,7 +37,7 @@ class StreamingTestsForeachBatchMixin:
             if q:
                 q.stop()
 
-    def test_streaming_foreachBatch_tempview(self):
+    def test_streaming_foreach_batch_tempview(self):
         q = None
 
         def collectBatch(batch_df, batch_id):
@@ -59,7 +59,7 @@ class StreamingTestsForeachBatchMixin:
             if q:
                 q.stop()
 
-    def test_streaming_foreachBatch_propagates_python_errors(self):
+    def test_streaming_foreach_batch_propagates_python_errors(self):
         from pyspark.errors import StreamingQueryException
 
         q = None
@@ -78,7 +78,7 @@ class StreamingTestsForeachBatchMixin:
             if q:
                 q.stop()
 
-    def test_streaming_foreachBatch_graceful_stop(self):
+    def test_streaming_foreach_batch_graceful_stop(self):
         # SPARK-39218: Make foreachBatch streaming query stop gracefully
         def func(batch_df, _):
             batch_df.sparkSession._jvm.java.lang.Thread.sleep(10000)
@@ -95,7 +95,7 @@ class 
StreamingTestsForeachBatch(StreamingTestsForeachBatchMixin, ReusedSQLTestC
 
 if __name__ == "__main__":
     import unittest
-    from pyspark.sql.tests.streaming.test_streaming_foreachBatch import *  # 
noqa: F401
+    from pyspark.sql.tests.streaming.test_streaming_foreach_batch import *  # 
noqa: F401
 
     try:
         import xmlrunner


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to