This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 8cbff856735f [SPARK-54902][PYTHON][TESTS] Rename testing helper 
contextmanager for temp view
8cbff856735f is described below

commit 8cbff856735f531a0d0b08b7e1081d7fe82bb059
Author: Ruifeng Zheng <[email protected]>
AuthorDate: Tue Jan 6 10:59:33 2026 +0900

    [SPARK-54902][PYTHON][TESTS] Rename testing helper contextmanager for temp 
view
    
    ### What changes were proposed in this pull request?
    Rename testing helper contextmanager for temp view: `tempView` -> 
`temp_view`
    
    ### Why are the changes needed?
    to be consistent with other contextmanager: `temp_func`, `temp_env`
    
    ### Does this PR introduce _any_ user-facing change?
    no, test-only change
    
    ### How was this patch tested?
    ci
    
    ### Was this patch authored or co-authored using generative AI tooling?
    no
    
    Closes #53677 from zhengruifeng/rename_temp_view.
    
    Authored-by: Ruifeng Zheng <[email protected]>
    Signed-off-by: Hyukjin Kwon <[email protected]>
---
 .../sql/tests/arrow/test_arrow_udf_grouped_agg.py  | 14 +++---
 .../sql/tests/arrow/test_arrow_udf_window.py       |  6 +--
 .../sql/tests/connect/test_connect_basic.py        |  4 +-
 .../sql/tests/connect/test_parity_subquery.py      |  2 +-
 .../tests/pandas/test_pandas_udf_grouped_agg.py    | 10 ++--
 .../sql/tests/pandas/test_pandas_udf_window.py     |  6 +--
 python/pyspark/sql/tests/test_catalog.py           |  2 +-
 python/pyspark/sql/tests/test_dataframe.py         |  2 +-
 python/pyspark/sql/tests/test_functions.py         |  2 +-
 python/pyspark/sql/tests/test_group.py             |  4 +-
 python/pyspark/sql/tests/test_sql.py               | 10 ++--
 python/pyspark/sql/tests/test_subquery.py          | 58 +++++++++++-----------
 python/pyspark/sql/tests/test_tvf.py               | 20 ++++----
 python/pyspark/sql/tests/test_types.py             | 16 +++---
 python/pyspark/sql/tests/test_udf.py               |  6 +--
 python/pyspark/sql/tests/test_udtf.py              |  4 +-
 python/pyspark/sql/tests/test_unified_udf.py       |  4 +-
 python/pyspark/testing/sqlutils.py                 |  2 +-
 18 files changed, 86 insertions(+), 86 deletions(-)

diff --git a/python/pyspark/sql/tests/arrow/test_arrow_udf_grouped_agg.py 
b/python/pyspark/sql/tests/arrow/test_arrow_udf_grouped_agg.py
index dd953ad3b973..fdeee65292cb 100644
--- a/python/pyspark/sql/tests/arrow/test_arrow_udf_grouped_agg.py
+++ b/python/pyspark/sql/tests/arrow/test_arrow_udf_grouped_agg.py
@@ -527,7 +527,7 @@ class GroupedAggArrowUDFTestsMixin:
 
         df = self.spark.range(0, 100)
 
-        with self.tempView("table"), self.temp_func("max_udf"):
+        with self.temp_view("table"), self.temp_func("max_udf"):
             df.createTempView("table")
             self.spark.udf.register("max_udf", max_udf)
 
@@ -556,7 +556,7 @@ class GroupedAggArrowUDFTestsMixin:
         df = self.data
         weighted_mean = self.arrow_agg_weighted_mean_udf
 
-        with self.tempView("v"), self.temp_func("weighted_mean"):
+        with self.temp_view("v"), self.temp_func("weighted_mean"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("weighted_mean", weighted_mean)
 
@@ -585,7 +585,7 @@ class GroupedAggArrowUDFTestsMixin:
         df = self.data
         weighted_mean = self.arrow_agg_weighted_mean_udf
 
-        with self.tempView("v"), self.temp_func("weighted_mean"):
+        with self.temp_view("v"), self.temp_func("weighted_mean"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("weighted_mean", weighted_mean)
 
@@ -625,7 +625,7 @@ class GroupedAggArrowUDFTestsMixin:
 
             return np.average(kwargs["v"], weights=kwargs["w"])
 
-        with self.tempView("v"), self.temp_func("weighted_mean"):
+        with self.temp_view("v"), self.temp_func("weighted_mean"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("weighted_mean", weighted_mean)
 
@@ -670,7 +670,7 @@ class GroupedAggArrowUDFTestsMixin:
         def biased_sum(v, w=None):
             return pa.compute.sum(v).as_py() + (pa.compute.sum(w).as_py() if w 
is not None else 100)
 
-        with self.tempView("v"), self.temp_func("biased_sum"):
+        with self.temp_view("v"), self.temp_func("biased_sum"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("biased_sum", biased_sum)
 
@@ -1232,7 +1232,7 @@ class GroupedAggArrowUDFTestsMixin:
             [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ("id", "v")
         )
 
-        with self.tempView("test_table"), self.temp_func("arrow_mean_iter"):
+        with self.temp_view("test_table"), self.temp_func("arrow_mean_iter"):
             df.createOrReplaceTempView("test_table")
             self.spark.udf.register("arrow_mean_iter", arrow_mean_iter)
 
@@ -1266,7 +1266,7 @@ class GroupedAggArrowUDFTestsMixin:
             ("id", "v", "w"),
         )
 
-        with self.tempView("test_table"), 
self.temp_func("arrow_weighted_mean_iter"):
+        with self.temp_view("test_table"), 
self.temp_func("arrow_weighted_mean_iter"):
             df.createOrReplaceTempView("test_table")
             self.spark.udf.register("arrow_weighted_mean_iter", 
arrow_weighted_mean_iter)
 
diff --git a/python/pyspark/sql/tests/arrow/test_arrow_udf_window.py 
b/python/pyspark/sql/tests/arrow/test_arrow_udf_window.py
index b0adfbe13186..a6a31aa5c2fb 100644
--- a/python/pyspark/sql/tests/arrow/test_arrow_udf_window.py
+++ b/python/pyspark/sql/tests/arrow/test_arrow_udf_window.py
@@ -406,7 +406,7 @@ class WindowArrowUDFTestsMixin:
                         windowed.collect(), df.withColumn("wm", 
sf.mean(df.v).over(w)).collect()
                     )
 
-        with self.tempView("v"), self.temp_func("weighted_mean"):
+        with self.temp_view("v"), self.temp_func("weighted_mean"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("weighted_mean", weighted_mean)
 
@@ -437,7 +437,7 @@ class WindowArrowUDFTestsMixin:
         df = self.data
         weighted_mean = self.arrow_agg_weighted_mean_udf
 
-        with self.tempView("v"), self.temp_func("weighted_mean"):
+        with self.temp_view("v"), self.temp_func("weighted_mean"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("weighted_mean", weighted_mean)
 
@@ -507,7 +507,7 @@ class WindowArrowUDFTestsMixin:
                         windowed.collect(), df.withColumn("wm", 
sf.mean(df.v).over(w)).collect()
                     )
 
-        with self.tempView("v"), self.temp_func("weighted_mean"):
+        with self.temp_view("v"), self.temp_func("weighted_mean"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("weighted_mean", weighted_mean)
 
diff --git a/python/pyspark/sql/tests/connect/test_connect_basic.py 
b/python/pyspark/sql/tests/connect/test_connect_basic.py
index 08e912a446e3..635bb35e2c87 100755
--- a/python/pyspark/sql/tests/connect/test_connect_basic.py
+++ b/python/pyspark/sql/tests/connect/test_connect_basic.py
@@ -769,7 +769,7 @@ class SparkConnectBasicTests(SparkConnectSQLTestCase):
 
     def test_create_global_temp_view(self):
         # SPARK-41127: test global temp view creation.
-        with self.tempView("view_1"):
+        with self.temp_view("view_1"):
             self.connect.sql("SELECT 1 AS X LIMIT 
0").createGlobalTempView("view_1")
             self.connect.sql("SELECT 2 AS X LIMIT 
1").createOrReplaceGlobalTempView("view_1")
             
self.assertTrue(self.spark.catalog.tableExists("global_temp.view_1"))
@@ -781,7 +781,7 @@ class SparkConnectBasicTests(SparkConnectSQLTestCase):
 
     def test_create_session_local_temp_view(self):
         # SPARK-41372: test session local temp view creation.
-        with self.tempView("view_local_temp"):
+        with self.temp_view("view_local_temp"):
             self.connect.sql("SELECT 1 AS X").createTempView("view_local_temp")
             self.assertEqual(self.connect.sql("SELECT * FROM 
view_local_temp").count(), 1)
             self.connect.sql("SELECT 1 AS X LIMIT 
0").createOrReplaceTempView("view_local_temp")
diff --git a/python/pyspark/sql/tests/connect/test_parity_subquery.py 
b/python/pyspark/sql/tests/connect/test_parity_subquery.py
index f3225fcb7f2d..5f91089e1d8d 100644
--- a/python/pyspark/sql/tests/connect/test_parity_subquery.py
+++ b/python/pyspark/sql/tests/connect/test_parity_subquery.py
@@ -25,7 +25,7 @@ from pyspark.testing.connectutils import ReusedConnectTestCase
 
 class SubqueryParityTests(SubqueryTestsMixin, ReusedConnectTestCase):
     def test_scalar_subquery_with_missing_outer_reference(self):
-        with self.tempView("l", "r"):
+        with self.temp_view("l", "r"):
             self.df1.createOrReplaceTempView("l")
             self.df2.createOrReplaceTempView("r")
 
diff --git a/python/pyspark/sql/tests/pandas/test_pandas_udf_grouped_agg.py 
b/python/pyspark/sql/tests/pandas/test_pandas_udf_grouped_agg.py
index 36fe3cfeb8a7..9a85d4cdbb4b 100644
--- a/python/pyspark/sql/tests/pandas/test_pandas_udf_grouped_agg.py
+++ b/python/pyspark/sql/tests/pandas/test_pandas_udf_grouped_agg.py
@@ -586,7 +586,7 @@ class GroupedAggPandasUDFTestsMixin:
 
         df = self.spark.range(0, 100)
 
-        with self.tempView("table"), self.temp_func("max_udf"):
+        with self.temp_view("table"), self.temp_func("max_udf"):
             df.createTempView("table")
             self.spark.udf.register("max_udf", max_udf)
 
@@ -613,7 +613,7 @@ class GroupedAggPandasUDFTestsMixin:
         df = self.data
         weighted_mean = self.pandas_agg_weighted_mean_udf
 
-        with self.tempView("v"), self.temp_func("weighted_mean"):
+        with self.temp_view("v"), self.temp_func("weighted_mean"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("weighted_mean", weighted_mean)
 
@@ -638,7 +638,7 @@ class GroupedAggPandasUDFTestsMixin:
         df = self.data
         weighted_mean = self.pandas_agg_weighted_mean_udf
 
-        with self.tempView("v"), self.temp_func("weighted_mean"):
+        with self.temp_view("v"), self.temp_func("weighted_mean"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("weighted_mean", weighted_mean)
 
@@ -678,7 +678,7 @@ class GroupedAggPandasUDFTestsMixin:
 
             return np.average(kwargs["v"], weights=kwargs["w"])
 
-        with self.tempView("v"), self.temp_func("weighted_mean"):
+        with self.temp_view("v"), self.temp_func("weighted_mean"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("weighted_mean", weighted_mean)
 
@@ -718,7 +718,7 @@ class GroupedAggPandasUDFTestsMixin:
         def biased_sum(v, w=None):
             return v.sum() + (w.sum() if w is not None else 100)
 
-        with self.tempView("v"), self.temp_func("biased_sum"):
+        with self.temp_view("v"), self.temp_func("biased_sum"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("biased_sum", biased_sum)
 
diff --git a/python/pyspark/sql/tests/pandas/test_pandas_udf_window.py 
b/python/pyspark/sql/tests/pandas/test_pandas_udf_window.py
index 7cfcb29f50c1..9a5f590c2a94 100644
--- a/python/pyspark/sql/tests/pandas/test_pandas_udf_window.py
+++ b/python/pyspark/sql/tests/pandas/test_pandas_udf_window.py
@@ -437,7 +437,7 @@ class WindowPandasUDFTestsMixin:
                 with self.subTest(bound=bound, query_no=i):
                     assertDataFrameEqual(windowed, df.withColumn("wm", 
sf.mean(df.v).over(w)))
 
-        with self.tempView("v"), self.temp_func("weighted_mean"):
+        with self.temp_view("v"), self.temp_func("weighted_mean"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("weighted_mean", weighted_mean)
 
@@ -465,7 +465,7 @@ class WindowPandasUDFTestsMixin:
         df = self.data
         weighted_mean = self.pandas_agg_weighted_mean_udf
 
-        with self.tempView("v"), self.temp_func("weighted_mean"):
+        with self.temp_view("v"), self.temp_func("weighted_mean"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("weighted_mean", weighted_mean)
 
@@ -533,7 +533,7 @@ class WindowPandasUDFTestsMixin:
                 with self.subTest(bound=bound, query_no=i):
                     assertDataFrameEqual(windowed, df.withColumn("wm", 
sf.mean(df.v).over(w)))
 
-        with self.tempView("v"), self.temp_func("weighted_mean"):
+        with self.temp_view("v"), self.temp_func("weighted_mean"):
             df.createOrReplaceTempView("v")
             self.spark.udf.register("weighted_mean", weighted_mean)
 
diff --git a/python/pyspark/sql/tests/test_catalog.py 
b/python/pyspark/sql/tests/test_catalog.py
index 5698195d8af1..2d4f0a84f654 100644
--- a/python/pyspark/sql/tests/test_catalog.py
+++ b/python/pyspark/sql/tests/test_catalog.py
@@ -72,7 +72,7 @@ class CatalogTestsMixin:
         with self.database("some_db"):
             spark.sql("CREATE DATABASE some_db")
             with self.table("tab1", "some_db.tab2", "tab3_via_catalog"):
-                with self.tempView("temp_tab"):
+                with self.temp_view("temp_tab"):
                     self.assertEqual(spark.catalog.listTables(), [])
                     self.assertEqual(spark.catalog.listTables("some_db"), [])
                     spark.createDataFrame([(1, 
1)]).createOrReplaceTempView("temp_tab")
diff --git a/python/pyspark/sql/tests/test_dataframe.py 
b/python/pyspark/sql/tests/test_dataframe.py
index 43b937185970..b9862a129677 100644
--- a/python/pyspark/sql/tests/test_dataframe.py
+++ b/python/pyspark/sql/tests/test_dataframe.py
@@ -679,7 +679,7 @@ class DataFrameTestsMixin:
     def test_cache_table(self):
         spark = self.spark
         tables = ["tab1", "tab2", "tab3"]
-        with self.tempView(*tables):
+        with self.temp_view(*tables):
             for i, tab in enumerate(tables):
                 spark.createDataFrame([(2, i), (3, 
i)]).createOrReplaceTempView(tab)
                 self.assertFalse(spark.catalog.isCached(tab))
diff --git a/python/pyspark/sql/tests/test_functions.py 
b/python/pyspark/sql/tests/test_functions.py
index 23c895f5629e..654c5f42e347 100644
--- a/python/pyspark/sql/tests/test_functions.py
+++ b/python/pyspark/sql/tests/test_functions.py
@@ -214,7 +214,7 @@ class FunctionsTestsMixin:
         self.assertTrue(df.is_cached)
         self.assertEqual(2, df.count())
 
-        with self.tempView("temp"):
+        with self.temp_view("temp"):
             df.createOrReplaceTempView("temp")
             df = self.spark.sql("select foo from temp")
             df.count()
diff --git a/python/pyspark/sql/tests/test_group.py 
b/python/pyspark/sql/tests/test_group.py
index dd170622922b..d8b51e1e7e2d 100644
--- a/python/pyspark/sql/tests/test_group.py
+++ b/python/pyspark/sql/tests/test_group.py
@@ -86,7 +86,7 @@ class GroupTestsMixin:
             ["a", "b"],
         )
 
-        with self.tempView("v"):
+        with self.temp_view("v"):
             df.createOrReplaceTempView("v")
 
             # basic case
@@ -158,7 +158,7 @@ class GroupTestsMixin:
             ["a", "b"],
         )
 
-        with self.tempView("v"):
+        with self.temp_view("v"):
             df.createOrReplaceTempView("v")
 
             df1 = spark.sql("select * from v order by 1 desc;")
diff --git a/python/pyspark/sql/tests/test_sql.py 
b/python/pyspark/sql/tests/test_sql.py
index e60ad183d147..77a4c0459624 100644
--- a/python/pyspark/sql/tests/test_sql.py
+++ b/python/pyspark/sql/tests/test_sql.py
@@ -28,7 +28,7 @@ class SQLTestsMixin:
         self.assertEqual(res[0][0], 2)
 
     def test_args_dict(self):
-        with self.tempView("test"):
+        with self.temp_view("test"):
             self.spark.range(10).createOrReplaceTempView("test")
             df = self.spark.sql(
                 "SELECT * FROM IDENTIFIER(:table_name)",
@@ -43,7 +43,7 @@ class SQLTestsMixin:
             self.assertEqual(df.tail(1), [Row(id=9)])
 
     def test_args_list(self):
-        with self.tempView("test"):
+        with self.temp_view("test"):
             self.spark.range(10).createOrReplaceTempView("test")
             df = self.spark.sql(
                 "SELECT * FROM test WHERE ? < id AND id < ?",
@@ -58,7 +58,7 @@ class SQLTestsMixin:
             self.assertEqual(df.tail(1), [Row(id=5)])
 
     def test_kwargs_literal(self):
-        with self.tempView("test"):
+        with self.temp_view("test"):
             self.spark.range(10).createOrReplaceTempView("test")
 
             df = self.spark.sql(
@@ -75,7 +75,7 @@ class SQLTestsMixin:
             self.assertEqual(df.tail(1), [Row(id=9)])
 
     def test_kwargs_literal_multiple_ref(self):
-        with self.tempView("test"):
+        with self.temp_view("test"):
             self.spark.range(10).createOrReplaceTempView("test")
 
             df = self.spark.sql(
@@ -115,7 +115,7 @@ class SQLTestsMixin:
         self.assertEqual(df1.tail(1), [Row(id=8)])
 
     def test_nested_view(self):
-        with self.tempView("v1", "v2", "v3", "v4"):
+        with self.temp_view("v1", "v2", "v3", "v4"):
             self.spark.range(10).createOrReplaceTempView("v1")
             self.spark.sql(
                 "SELECT * FROM IDENTIFIER(:view) WHERE id > :m",
diff --git a/python/pyspark/sql/tests/test_subquery.py 
b/python/pyspark/sql/tests/test_subquery.py
index 7c87f4b46cc6..7a5b4a6de5fe 100644
--- a/python/pyspark/sql/tests/test_subquery.py
+++ b/python/pyspark/sql/tests/test_subquery.py
@@ -95,7 +95,7 @@ class SubqueryTestsMixin:
         )
 
     def test_uncorrelated_scalar_subquery_with_view(self):
-        with self.tempView("subqueryData"):
+        with self.temp_view("subqueryData"):
             df = self.spark.createDataFrame(
                 [(1, "one"), (2, "two"), (3, "three")], ["key", "value"]
             )
@@ -157,7 +157,7 @@ class SubqueryTestsMixin:
             )
 
     def test_scalar_subquery_against_local_relations(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             self.spark.createDataFrame([(1, 1), (2, 2)], ["c1", 
"c2"]).createOrReplaceTempView("t1")
             self.spark.createDataFrame([(1, 1), (2, 2)], ["c1", 
"c2"]).createOrReplaceTempView("t2")
 
@@ -203,7 +203,7 @@ class SubqueryTestsMixin:
             )
 
     def test_correlated_scalar_subquery(self):
-        with self.tempView("l", "r"):
+        with self.temp_view("l", "r"):
             self.df1.createOrReplaceTempView("l")
             self.df2.createOrReplaceTempView("r")
 
@@ -368,7 +368,7 @@ class SubqueryTestsMixin:
                 )
 
     def test_exists_subquery(self):
-        with self.tempView("l", "r"):
+        with self.temp_view("l", "r"):
             self.df1.createOrReplaceTempView("l")
             self.df2.createOrReplaceTempView("r")
 
@@ -460,7 +460,7 @@ class SubqueryTestsMixin:
                 )
 
     def test_in_subquery(self):
-        with self.tempView("l", "r", "t"):
+        with self.temp_view("l", "r", "t"):
             self.df1.createOrReplaceTempView("l")
             self.df2.createOrReplaceTempView("r")
             self.spark.table("r").filter(
@@ -500,7 +500,7 @@ class SubqueryTestsMixin:
                     ),
                 )
 
-            with self.subTest("IN with struct"), self.tempView("ll", "rr"):
+            with self.subTest("IN with struct"), self.temp_view("ll", "rr"):
                 self.spark.table("l").select(
                     "*", sf.struct("a", "b").alias("sab")
                 ).createOrReplaceTempView("ll")
@@ -665,7 +665,7 @@ class SubqueryTestsMixin:
                 )
 
     def test_scalar_subquery_with_missing_outer_reference(self):
-        with self.tempView("l", "r"):
+        with self.temp_view("l", "r"):
             self.df1.createOrReplaceTempView("l")
             self.df2.createOrReplaceTempView("r")
 
@@ -706,7 +706,7 @@ class SubqueryTestsMixin:
         return self.spark.table("t3")
 
     def test_lateral_join_with_single_column_select(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.table1()
             t2 = self.table2()
 
@@ -724,7 +724,7 @@ class SubqueryTestsMixin:
             )
 
     def test_lateral_join_with_star_expansion(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.table1()
             t2 = self.table2()
 
@@ -746,7 +746,7 @@ class SubqueryTestsMixin:
             )
 
     def test_lateral_join_with_different_join_types(self):
-        with self.tempView("t1"):
+        with self.temp_view("t1"):
             t1 = self.table1()
 
             assertDataFrameEqual(
@@ -800,7 +800,7 @@ class SubqueryTestsMixin:
             )
 
     def test_lateral_join_with_subquery_alias(self):
-        with self.tempView("t1"):
+        with self.temp_view("t1"):
             t1 = self.table1()
 
             assertDataFrameEqual(
@@ -814,7 +814,7 @@ class SubqueryTestsMixin:
             )
 
     def test_lateral_join_with_correlated_predicates(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.table1()
             t2 = self.table2()
 
@@ -836,7 +836,7 @@ class SubqueryTestsMixin:
             )
 
     def test_lateral_join_with_aggregation_and_correlated_predicates(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.table1()
             t2 = self.table2()
 
@@ -854,7 +854,7 @@ class SubqueryTestsMixin:
             )
 
     def test_lateral_join_reference_preceding_from_clause_items(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.table1()
             t2 = self.table2()
 
@@ -866,7 +866,7 @@ class SubqueryTestsMixin:
             )
 
     def test_multiple_lateral_joins(self):
-        with self.tempView("t1"):
+        with self.temp_view("t1"):
             t1 = self.table1()
 
             assertDataFrameEqual(
@@ -896,7 +896,7 @@ class SubqueryTestsMixin:
             )
 
     def test_lateral_join_in_between_regular_joins(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.table1()
             t2 = self.table2()
 
@@ -917,7 +917,7 @@ class SubqueryTestsMixin:
             )
 
     def test_nested_lateral_joins(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.table1()
             t2 = self.table2()
 
@@ -942,7 +942,7 @@ class SubqueryTestsMixin:
             )
 
     def test_scalar_subquery_inside_lateral_join(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.table1()
             t2 = self.table2()
 
@@ -976,7 +976,7 @@ class SubqueryTestsMixin:
             )
 
     def test_lateral_join_inside_subquery(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.table1()
             t2 = self.table2()
 
@@ -1014,7 +1014,7 @@ class SubqueryTestsMixin:
             )
 
     def test_lateral_join_with_table_valued_functions(self):
-        with self.tempView("t1", "t3"):
+        with self.temp_view("t1", "t3"):
             t1 = self.table1()
             t3 = self.table3()
 
@@ -1044,7 +1044,7 @@ class SubqueryTestsMixin:
             )
 
     def 
test_lateral_join_with_table_valued_functions_and_join_conditions(self):
-        with self.tempView("t1", "t3"):
+        with self.temp_view("t1", "t3"):
             t1 = self.table1()
             t3 = self.table3()
 
@@ -1076,7 +1076,7 @@ class SubqueryTestsMixin:
             )
 
     def test_subquery_with_generator_and_tvf(self):
-        with self.tempView("t1"):
+        with self.temp_view("t1"):
             t1 = self.table1()
 
             assertDataFrameEqual(
@@ -1089,7 +1089,7 @@ class SubqueryTestsMixin:
             )
 
     def test_subquery_in_join_condition(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.table1()
             t2 = self.table2()
 
@@ -1102,7 +1102,7 @@ class SubqueryTestsMixin:
         self.check_subquery_in_unpivot(QueryContextType.DataFrame, "exists")
 
     def check_subquery_in_unpivot(self, query_context_type, fragment):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.table1()
             t2 = self.table2()
 
@@ -1121,7 +1121,7 @@ class SubqueryTestsMixin:
             )
 
     def test_subquery_in_transpose(self):
-        with self.tempView("t1"):
+        with self.temp_view("t1"):
             t1 = self.table1()
 
             with self.assertRaises(AnalysisException) as pe:
@@ -1134,7 +1134,7 @@ class SubqueryTestsMixin:
             )
 
     def test_subquery_in_with_columns(self):
-        with self.tempView("t1"):
+        with self.temp_view("t1"):
             t1 = self.table1()
 
             assertDataFrameEqual(
@@ -1169,7 +1169,7 @@ class SubqueryTestsMixin:
             )
 
     def test_subquery_in_with_columns_renamed(self):
-        with self.tempView("t1"):
+        with self.temp_view("t1"):
             t1 = self.table1()
 
             assertDataFrameEqual(
@@ -1185,13 +1185,13 @@ class SubqueryTestsMixin:
             )
 
     def test_subquery_in_drop(self):
-        with self.tempView("t1"):
+        with self.temp_view("t1"):
             t1 = self.table1()
 
             
assertDataFrameEqual(t1.drop(self.spark.range(1).select(sf.lit("c1")).scalar()),
 t1)
 
     def test_subquery_in_repartition(self):
-        with self.tempView("t1"):
+        with self.temp_view("t1"):
             t1 = self.table1()
 
             
assertDataFrameEqual(t1.repartition(self.spark.range(1).select(sf.lit(1)).scalar()),
 t1)
diff --git a/python/pyspark/sql/tests/test_tvf.py 
b/python/pyspark/sql/tests/test_tvf.py
index c7274c0810cf..7493ceb5fd64 100644
--- a/python/pyspark/sql/tests/test_tvf.py
+++ b/python/pyspark/sql/tests/test_tvf.py
@@ -53,7 +53,7 @@ class TVFTestsMixin:
         assertDataFrameEqual(actual=actual, expected=expected)
 
     def test_explode_with_lateral_join(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.spark.sql("VALUES (0, 1), (1, 2) AS t1(c1, c2)")
             t1.createOrReplaceTempView("t1")
             t3 = self.spark.sql(
@@ -115,7 +115,7 @@ class TVFTestsMixin:
         assertDataFrameEqual(actual=actual, expected=expected)
 
     def test_explode_outer_with_lateral_join(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.spark.sql("VALUES (0, 1), (1, 2) AS t1(c1, c2)")
             t1.createOrReplaceTempView("t1")
             t3 = self.spark.sql(
@@ -180,7 +180,7 @@ class TVFTestsMixin:
         assertDataFrameEqual(actual=actual, expected=expected)
 
     def test_inline_with_lateral_join(self):
-        with self.tempView("array_struct"):
+        with self.temp_view("array_struct"):
             array_struct = self.spark.sql(
                 """
                 VALUES
@@ -239,7 +239,7 @@ class TVFTestsMixin:
         assertDataFrameEqual(actual=actual, expected=expected)
 
     def test_inline_outer_with_lateral_join(self):
-        with self.tempView("array_struct"):
+        with self.temp_view("array_struct"):
             array_struct = self.spark.sql(
                 """
                 VALUES
@@ -282,7 +282,7 @@ class TVFTestsMixin:
         )
 
     def test_json_tuple_with_lateral_join(self):
-        with self.tempView("json_table"):
+        with self.temp_view("json_table"):
             json_table = self.spark.sql(
                 """
                 VALUES
@@ -369,7 +369,7 @@ class TVFTestsMixin:
         assertDataFrameEqual(actual=actual, expected=expected)
 
     def test_posexplode_with_lateral_join(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.spark.sql("VALUES (0, 1), (1, 2) AS t1(c1, c2)")
             t1.createOrReplaceTempView("t1")
             t3 = self.spark.sql(
@@ -431,7 +431,7 @@ class TVFTestsMixin:
         assertDataFrameEqual(actual=actual, expected=expected)
 
     def test_posexplode_outer_with_lateral_join(self):
-        with self.tempView("t1", "t2"):
+        with self.temp_view("t1", "t2"):
             t1 = self.spark.sql("VALUES (0, 1), (1, 2) AS t1(c1, c2)")
             t1.createOrReplaceTempView("t1")
             t3 = self.spark.sql(
@@ -472,7 +472,7 @@ class TVFTestsMixin:
         assertDataFrameEqual(actual=actual, expected=expected)
 
     def test_stack_with_lateral_join(self):
-        with self.tempView("t1", "t3"):
+        with self.temp_view("t1", "t3"):
             t1 = self.spark.sql("VALUES (0, 1), (1, 2) AS t1(c1, c2)")
             t1.createOrReplaceTempView("t1")
             t3 = self.spark.sql(
@@ -562,7 +562,7 @@ class TVFTestsMixin:
         assertDataFrameEqual(actual=actual, expected=expected)
 
     def test_variant_explode_with_lateral_join(self):
-        with self.tempView("variant_table"):
+        with self.temp_view("variant_table"):
             variant_table = self.spark.sql(
                 """
                 SELECT id, parse_json(v) AS v FROM VALUES
@@ -621,7 +621,7 @@ class TVFTestsMixin:
         assertDataFrameEqual(actual=actual, expected=expected)
 
     def test_variant_explode_outer_with_lateral_join(self):
-        with self.tempView("variant_table"):
+        with self.temp_view("variant_table"):
             variant_table = self.spark.sql(
                 """
                 SELECT id, parse_json(v) AS v FROM VALUES
diff --git a/python/pyspark/sql/tests/test_types.py 
b/python/pyspark/sql/tests/test_types.py
index 5f5314a03c32..8d977d3692de 100644
--- a/python/pyspark/sql/tests/test_types.py
+++ b/python/pyspark/sql/tests/test_types.py
@@ -149,7 +149,7 @@ class TypesTestsMixin:
         self.assertEqual([], df.rdd.map(lambda r: r.l).first())
         self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
 
-        with self.tempView("test"):
+        with self.temp_view("test"):
             df.createOrReplaceTempView("test")
             result = self.spark.sql("SELECT l from test")
             self.assertEqual([], result.head()[0])
@@ -165,7 +165,7 @@ class TypesTestsMixin:
         self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
         self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
 
-        with self.tempView("test2"):
+        with self.temp_view("test2"):
             df2.createOrReplaceTempView("test2")
             result = self.spark.sql("SELECT l from test2")
             self.assertEqual([], result.head()[0])
@@ -612,7 +612,7 @@ class TypesTestsMixin:
         )
         self.assertEqual(r, results.first())
 
-        with self.tempView("table2"):
+        with self.temp_view("table2"):
             df.createOrReplaceTempView("table2")
             r = self.spark.sql(
                 "SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, "
@@ -627,7 +627,7 @@ class TypesTestsMixin:
         self.assertEqual(1, row.asDict()["l"][0].a)
         df = self.spark.createDataFrame([row])
 
-        with self.tempView("test"):
+        with self.temp_view("test"):
             df.createOrReplaceTempView("test")
             row = self.spark.sql("select l, d from test").head()
             self.assertEqual(1, row.asDict()["l"][0].a)
@@ -1112,7 +1112,7 @@ class TypesTestsMixin:
         field = [f for f in schema.fields if f.name == "point"][0]
         self.assertEqual(type(field.dataType), ExamplePointUDT)
 
-        with self.tempView("labeled_point"):
+        with self.temp_view("labeled_point"):
             df.createOrReplaceTempView("labeled_point")
             point = self.spark.sql("SELECT point FROM 
labeled_point").head().point
             self.assertEqual(point, ExamplePoint(1.0, 2.0))
@@ -1123,7 +1123,7 @@ class TypesTestsMixin:
         field = [f for f in schema.fields if f.name == "point"][0]
         self.assertEqual(type(field.dataType), PythonOnlyUDT)
 
-        with self.tempView("labeled_point"):
+        with self.temp_view("labeled_point"):
             df.createOrReplaceTempView("labeled_point")
             point = self.spark.sql("SELECT point FROM 
labeled_point").head().point
             self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
@@ -1135,7 +1135,7 @@ class TypesTestsMixin:
         field = [f for f in schema.fields if f.name == "point"][0]
         self.assertEqual(type(field.dataType), ExamplePointUDT)
 
-        with self.tempView("labeled_point"):
+        with self.temp_view("labeled_point"):
             df.createOrReplaceTempView("labeled_point")
             point = self.spark.sql("SELECT point FROM 
labeled_point").head().point
             self.assertEqual(point, ExamplePoint(1.0, 2.0))
@@ -1146,7 +1146,7 @@ class TypesTestsMixin:
         field = [f for f in schema.fields if f.name == "point"][0]
         self.assertEqual(type(field.dataType), PythonOnlyUDT)
 
-        with self.tempView("labeled_point"):
+        with self.temp_view("labeled_point"):
             df.createOrReplaceTempView("labeled_point")
             point = self.spark.sql("SELECT point FROM 
labeled_point").head().point
             self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
diff --git a/python/pyspark/sql/tests/test_udf.py 
b/python/pyspark/sql/tests/test_udf.py
index 9c5fa2ad1bba..0a5f9add5563 100644
--- a/python/pyspark/sql/tests/test_udf.py
+++ b/python/pyspark/sql/tests/test_udf.py
@@ -102,7 +102,7 @@ class BaseUDFTestsMixin(object):
             self.assertEqual(row[0], 4)
 
     def test_udf2(self):
-        with self.tempView("test"):
+        with self.temp_view("test"):
             self.spark.catalog.registerFunction("strlen", lambda string: 
len(string), IntegerType())
             self.spark.createDataFrame([("test",)], 
["a"]).createOrReplaceTempView("test")
             [res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) 
> 1").collect()
@@ -323,7 +323,7 @@ class BaseUDFTestsMixin(object):
             self.assertEqual(row[0], "bar")
 
     def test_udf_with_array_type(self):
-        with self.tempView("test"), self.temp_func("copylist", "maplen"):
+        with self.temp_view("test"), self.temp_func("copylist", "maplen"):
             self.spark.createDataFrame(
                 [
                     ([0, 1, 2], {"key": [0, 1, 2, 3, 4]}),
@@ -922,7 +922,7 @@ class BaseUDFTestsMixin(object):
     # SPARK-26293
     def test_udf_in_subquery(self):
         f = udf(lambda x: x, "long")
-        with self.tempView("v"):
+        with self.temp_view("v"):
             self.spark.range(1).filter(f("id") >= 0).createTempView("v")
             result = self.spark.sql(
                 "select i from values(0L) as data(i) where i in (select id 
from v)"
diff --git a/python/pyspark/sql/tests/test_udtf.py 
b/python/pyspark/sql/tests/test_udtf.py
index de57c8d0cf38..08e2c036612f 100644
--- a/python/pyspark/sql/tests/test_udtf.py
+++ b/python/pyspark/sql/tests/test_udtf.py
@@ -1400,7 +1400,7 @@ class BaseUDTFTestsMixin:
         func = self.udtf_for_table_argument()
         self.spark.udtf.register("test_udtf", func)
 
-        with self.tempView("v"):
+        with self.temp_view("v"):
             self.spark.sql("CREATE OR REPLACE TEMPORARY VIEW v as SELECT id 
FROM range(0, 8)")
             assertDataFrameEqual(
                 self.spark.sql("SELECT * FROM test_udtf(TABLE (v))"),
@@ -1416,7 +1416,7 @@ class BaseUDTFTestsMixin:
         func = udtf(TestUDTF, returnType="a: int")
         self.spark.udtf.register("test_udtf", func)
 
-        with self.tempView("v"):
+        with self.temp_view("v"):
             self.spark.sql("CREATE OR REPLACE TEMPORARY VIEW v as SELECT id 
FROM range(0, 8)")
             assertDataFrameEqual(
                 self.spark.sql("SELECT * FROM test_udtf(5, TABLE (v))"),
diff --git a/python/pyspark/sql/tests/test_unified_udf.py 
b/python/pyspark/sql/tests/test_unified_udf.py
index b6fd9be0e325..c0a82d00173a 100644
--- a/python/pyspark/sql/tests/test_unified_udf.py
+++ b/python/pyspark/sql/tests/test_unified_udf.py
@@ -177,7 +177,7 @@ class UnifiedUDFTestsMixin:
         result1 = df.withColumn("res", pd_win_max("v").over(w)).collect()
         self.assertEqual(result1, expected)
 
-        with self.tempView("pd_tbl"), self.temp_func("pd_win_max"):
+        with self.temp_view("pd_tbl"), self.temp_func("pd_win_max"):
             df.createOrReplaceTempView("pd_tbl")
             self.spark.udf.register("pd_win_max", pd_win_max)
 
@@ -355,7 +355,7 @@ class UnifiedUDFTestsMixin:
         result1 = df.withColumn("mean_v", pa_win_max("v").over(w)).collect()
         self.assertEqual(result1, expected)
 
-        with self.tempView("pa_tbl"), self.temp_func("pa_win_max"):
+        with self.temp_view("pa_tbl"), self.temp_func("pa_win_max"):
             df.createOrReplaceTempView("pa_tbl")
             self.spark.udf.register("pa_win_max", pa_win_max)
 
diff --git a/python/pyspark/testing/sqlutils.py 
b/python/pyspark/testing/sqlutils.py
index 927e4f4250c3..2431f19add3b 100644
--- a/python/pyspark/testing/sqlutils.py
+++ b/python/pyspark/testing/sqlutils.py
@@ -134,7 +134,7 @@ class SQLTestUtils:
                 self.spark.sql("DROP TABLE IF EXISTS %s" % t)
 
     @contextmanager
-    def tempView(self, *views):
+    def temp_view(self, *views):
         """
         A convenient context manager to test with some specific views. This 
drops the given views
         if it exists.


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to