This is an automated email from the ASF dual-hosted git repository.

ruifengz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 620474661f11 [SPARK-53397][PYTHON][TESTS] Fix UDTF with collations 
test indentation
620474661f11 is described below

commit 620474661f119c1029372e381a6fbc405ab145d1
Author: ilicmarkodb <marko.i...@databricks.com>
AuthorDate: Thu Aug 28 11:01:43 2025 +0800

    [SPARK-53397][PYTHON][TESTS] Fix UDTF with collations test indentation
    
    <!--
    Thanks for sending a pull request!  Here are some tips for you:
      1. If this is your first time, please read our contributor guidelines: 
https://spark.apache.org/contributing.html
      2. Ensure you have added or run the appropriate tests for your PR: 
https://spark.apache.org/developer-tools.html
      3. If the PR is unfinished, add '[WIP]' in your PR title, e.g., 
'[WIP][SPARK-XXXX] Your PR title ...'.
      4. Be sure to keep the PR description updated to reflect all changes.
      5. Please write your PR title to summarize what this PR proposes.
      6. If possible, provide a concise example to reproduce the issue for a 
faster review.
      7. If you want to add a new configuration, please read the guideline 
first for naming configurations in
         
'core/src/main/scala/org/apache/spark/internal/config/ConfigEntry.scala'.
      8. If you want to add or modify an error type or message, please read the 
guideline first in
         'common/utils/src/main/resources/error/README.md'.
    -->
    
    ### What changes were proposed in this pull request?
    Fix UDTF with collations test indentation. The test was accidentally merged 
without proper indentation, causing it to be skipped.
    
    ### Why are the changes needed?
    Better testing.
    
    ### Does this PR introduce _any_ user-facing change?
    No.
    
    ### How was this patch tested?
    New test.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    No.
    
    Closes #52001 from ilicmarkodb/fix_udtf_test.
    
    Lead-authored-by: ilicmarkodb <marko.i...@databricks.com>
    Co-authored-by: Ruifeng Zheng <ruife...@foxmail.com>
    Signed-off-by: Ruifeng Zheng <ruife...@apache.org>
---
 python/pyspark/sql/tests/test_udtf.py | 72 ++++++++++++++++++-----------------
 1 file changed, 37 insertions(+), 35 deletions(-)

diff --git a/python/pyspark/sql/tests/test_udtf.py 
b/python/pyspark/sql/tests/test_udtf.py
index c8d7d9f14563..7079d22fbf0d 100644
--- a/python/pyspark/sql/tests/test_udtf.py
+++ b/python/pyspark/sql/tests/test_udtf.py
@@ -2923,6 +2923,43 @@ class BaseUDTFTestsMixin:
                     err_type=Exception,
                 )
 
+    def test_udtf_with_collated_string_types(self):
+        @udtf(
+            returnType="out1 string, out2 string collate UTF8_BINARY, "
+            "out3 string collate UTF8_LCASE, out4 string collate UNICODE"
+        )
+        class MyUDTF:
+            def eval(self, v1, v2, v3, v4):
+                yield (v1 + "1", v2 + "2", v3 + "3", v4 + "4")
+
+        schema = StructType(
+            [
+                StructField("col1", StringType(), True),
+                StructField("col2", StringType("UTF8_BINARY"), True),
+                StructField("col3", StringType("UTF8_LCASE"), True),
+                StructField("col4", StringType("UNICODE"), True),
+            ]
+        )
+        df = self.spark.createDataFrame([("hello",) * 4], schema=schema)
+
+        result_df = df.lateralJoin(
+            MyUDTF(
+                col("col1").outer(), col("col2").outer(), col("col3").outer(), 
col("col4").outer()
+            )
+        ).select("out1", "out2", "out3", "out4")
+
+        expected_row = ("hello1", "hello2", "hello3", "hello4")
+        self.assertEqual(result_df.collect()[0], expected_row)
+
+        expected_output_types = [
+            StringType(),
+            StringType("UTF8_BINARY"),
+            StringType("UTF8_LCASE"),
+            StringType("UNICODE"),
+        ]
+        for idx, field in enumerate(result_df.schema.fields):
+            self.assertEqual(field.dataType, expected_output_types[idx])
+
 
 class UDTFTests(BaseUDTFTestsMixin, ReusedSQLTestCase):
     @classmethod
@@ -3490,41 +3527,6 @@ class UDTFArrowTestsMixin(LegacyUDTFArrowTestsMixin):
                     udtf(TestUDTF, returnType=ret_type)().collect()
 
 
-def test_udtf_with_collated_string_types(self):
-    @udtf(
-        "out1 string, out2 string collate UTF8_BINARY, out3 string collate 
UTF8_LCASE,"
-        " out4 string collate UNICODE"
-    )
-    class MyUDTF:
-        def eval(self, v1, v2, v3, v4):
-            yield (v1 + "1", v2 + "2", v3 + "3", v4 + "4")
-
-    schema = StructType(
-        [
-            StructField("col1", StringType(), True),
-            StructField("col2", StringType("UTF8_BINARY"), True),
-            StructField("col3", StringType("UTF8_LCASE"), True),
-            StructField("col4", StringType("UNICODE"), True),
-        ]
-    )
-    df = self.spark.createDataFrame([("hello",) * 4], schema=schema)
-
-    df_out = df.select(MyUDTF(df.col1, df.col2, df.col3, df.col4).alias("out"))
-    result_df = df_out.select("out.*")
-
-    expected_row = ("hello1", "hello2", "hello3", "hello4")
-    self.assertEqual(result_df.collect()[0], expected_row)
-
-    expected_output_types = [
-        StringType(),
-        StringType("UTF8_BINARY"),
-        StringType("UTF8_LCASE"),
-        StringType("UNICODE"),
-    ]
-    for idx, field in enumerate(result_df.schema.fields):
-        self.assertEqual(field.dataType, expected_output_types[idx])
-
-
 class UDTFArrowTests(UDTFArrowTestsMixin, ReusedSQLTestCase):
     @classmethod
     def setUpClass(cls):


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to