This is an automated email from the ASF dual-hosted git repository.

ruifengz pushed a commit to branch branch-3.5
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.5 by this push:
     new fb85a4c199a [SPARK-43611][SPARK-44602][SQL][PS][CONNCECT][3.5] Make 
`ExtractWindowExpressions` & `WidenSetOperationTypes` retain the `PLAN_ID_TAG`
fb85a4c199a is described below

commit fb85a4c199a49f26af9f7cd1c9e450508d21d089
Author: Ruifeng Zheng <ruife...@apache.org>
AuthorDate: Tue Aug 1 13:43:26 2023 +0800

    [SPARK-43611][SPARK-44602][SQL][PS][CONNCECT][3.5] Make 
`ExtractWindowExpressions` & `WidenSetOperationTypes` retain the `PLAN_ID_TAG`
    
    ### What changes were proposed in this pull request?
    
    Backport for https://github.com/apache/spark/pull/42086 and 
https://github.com/apache/spark/pull/42230
    
    ### Why are the changes needed?
    
    for functionality parity
    
    ### Does this PR introduce _any_ user-facing change?
    
    Enabling couple of pandas APIs
    
    ### How was this patch tested?
    
    Enabling the existing UTs
    
    Closes #42252 from itholic/SPARK-43611-3.5.
    
    Lead-authored-by: Ruifeng Zheng <ruife...@apache.org>
    Co-authored-by: itholic <haejoon....@databricks.com>
    Signed-off-by: Ruifeng Zheng <ruife...@apache.org>
---
 .../connect/computation/test_parity_combine.py     |  11 +-
 .../computation/test_parity_missing_data.py        |  30 ------
 .../tests/connect/indexes/test_parity_base.py      |  12 ---
 .../tests/connect/series/test_parity_arg_ops.py    |   6 +-
 .../tests/connect/series/test_parity_compute.py    |  16 ---
 .../tests/connect/series/test_parity_cumulative.py |  25 +----
 .../tests/connect/series/test_parity_index.py      |   7 +-
 .../connect/series/test_parity_missing_data.py     |  35 +-----
 .../tests/connect/series/test_parity_stat.py       |  11 +-
 .../pandas/tests/connect/test_parity_ewm.py        |  12 +--
 .../pandas/tests/connect/test_parity_expanding.py  | 120 +--------------------
 .../pandas/tests/connect/test_parity_namespace.py  |  12 ---
 .../test_parity_ops_on_diff_frames_groupby.py      |  48 +--------
 ..._parity_ops_on_diff_frames_groupby_expanding.py |  42 +-------
 .../pandas/tests/connect/test_parity_rolling.py    | 120 +--------------------
 .../spark/sql/catalyst/analysis/Analyzer.scala     |  12 ++-
 .../spark/sql/catalyst/analysis/TypeCoercion.scala |  13 ++-
 17 files changed, 32 insertions(+), 500 deletions(-)

diff --git 
a/python/pyspark/pandas/tests/connect/computation/test_parity_combine.py 
b/python/pyspark/pandas/tests/connect/computation/test_parity_combine.py
index 175404ff750..af23600055e 100644
--- a/python/pyspark/pandas/tests/connect/computation/test_parity_combine.py
+++ b/python/pyspark/pandas/tests/connect/computation/test_parity_combine.py
@@ -16,22 +16,13 @@
 #
 import unittest
 
-from pyspark import pandas as ps
 from pyspark.pandas.tests.computation.test_combine import FrameCombineMixin
 from pyspark.testing.connectutils import ReusedConnectTestCase
 from pyspark.testing.pandasutils import PandasOnSparkTestUtils
 
 
 class FrameParityCombineTests(FrameCombineMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase):
-    @property
-    def psdf(self):
-        return ps.from_pandas(self.pdf)
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_append(self):
-        super().test_append()
+    pass
 
 
 if __name__ == "__main__":
diff --git 
a/python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py 
b/python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py
index a88c8692eca..d2ff09e5e8a 100644
--- 
a/python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py
+++ 
b/python/pyspark/pandas/tests/connect/computation/test_parity_missing_data.py
@@ -29,36 +29,6 @@ class FrameParityMissingDataTests(
     def psdf(self):
         return ps.from_pandas(self.pdf)
 
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_backfill(self):
-        super().test_backfill()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_bfill(self):
-        super().test_bfill()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_ffill(self):
-        super().test_ffill()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_fillna(self):
-        return super().test_fillna()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_pad(self):
-        super().test_pad()
-
 
 if __name__ == "__main__":
     from pyspark.pandas.tests.connect.computation.test_parity_missing_data 
import *  # noqa: F401
diff --git a/python/pyspark/pandas/tests/connect/indexes/test_parity_base.py 
b/python/pyspark/pandas/tests/connect/indexes/test_parity_base.py
index b1e185389f3..3cf4dc9b3d2 100644
--- a/python/pyspark/pandas/tests/connect/indexes/test_parity_base.py
+++ b/python/pyspark/pandas/tests/connect/indexes/test_parity_base.py
@@ -29,18 +29,6 @@ class IndexesParityTests(
     def psdf(self):
         return ps.from_pandas(self.pdf)
 
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_append(self):
-        super().test_append()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_monotonic(self):
-        super().test_monotonic()
-
     @unittest.skip("TODO(SPARK-43620): Support `Column` for 
SparkConnectColumn.__getitem__.")
     def test_factorize(self):
         super().test_factorize()
diff --git a/python/pyspark/pandas/tests/connect/series/test_parity_arg_ops.py 
b/python/pyspark/pandas/tests/connect/series/test_parity_arg_ops.py
index b3df55cb68e..bd17521dd84 100644
--- a/python/pyspark/pandas/tests/connect/series/test_parity_arg_ops.py
+++ b/python/pyspark/pandas/tests/connect/series/test_parity_arg_ops.py
@@ -22,11 +22,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 
 
 class SeriesParityArgOpsTests(SeriesArgOpsMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_argsort(self):
-        super().test_argsort()
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/series/test_parity_compute.py 
b/python/pyspark/pandas/tests/connect/series/test_parity_compute.py
index 00e35b27e8f..f757d19ca69 100644
--- a/python/pyspark/pandas/tests/connect/series/test_parity_compute.py
+++ b/python/pyspark/pandas/tests/connect/series/test_parity_compute.py
@@ -22,22 +22,6 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 
 
 class SeriesParityComputeTests(SeriesComputeMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_diff(self):
-        super().test_diff()
-
-    @unittest.skip("TODO(SPARK-43620): Support `Column` for 
SparkConnectColumn.__getitem__.")
-    def test_factorize(self):
-        super().test_factorize()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_shift(self):
-        super().test_shift()
-
     @unittest.skip(
         "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
     )
diff --git 
a/python/pyspark/pandas/tests/connect/series/test_parity_cumulative.py 
b/python/pyspark/pandas/tests/connect/series/test_parity_cumulative.py
index f7cd03e057a..c5c61e1f33b 100644
--- a/python/pyspark/pandas/tests/connect/series/test_parity_cumulative.py
+++ b/python/pyspark/pandas/tests/connect/series/test_parity_cumulative.py
@@ -24,29 +24,8 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 class SeriesParityCumulativeTests(
     SeriesCumulativeMixin, PandasOnSparkTestUtils, ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummax(self):
-        super().test_cummax()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummin(self):
-        super().test_cummin()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumprod(self):
-        super().test_cumprod()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumsum(self):
-        super().test_cumsum()
+
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/series/test_parity_index.py 
b/python/pyspark/pandas/tests/connect/series/test_parity_index.py
index 81da3e44d6d..2b92cce61fb 100644
--- a/python/pyspark/pandas/tests/connect/series/test_parity_index.py
+++ b/python/pyspark/pandas/tests/connect/series/test_parity_index.py
@@ -22,11 +22,8 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 
 
 class SeriesParityIndexTests(SeriesIndexMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_reset_index_with_default_index_types(self):
-        super().test_reset_index_with_default_index_types()
+
+    pass
 
 
 if __name__ == "__main__":
diff --git 
a/python/pyspark/pandas/tests/connect/series/test_parity_missing_data.py 
b/python/pyspark/pandas/tests/connect/series/test_parity_missing_data.py
index e648173289c..a95b312bce6 100644
--- a/python/pyspark/pandas/tests/connect/series/test_parity_missing_data.py
+++ b/python/pyspark/pandas/tests/connect/series/test_parity_missing_data.py
@@ -24,41 +24,8 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 class SeriesParityMissingDataTests(
     SeriesMissingDataMixin, PandasOnSparkTestUtils, ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_backfill(self):
-        super().test_backfill()
 
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_bfill(self):
-        super().test_bfill()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_ffill(self):
-        super().test_ffill()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_fillna(self):
-        super().test_fillna()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_pad(self):
-        super().test_pad()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_replace(self):
-        super().test_replace()
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/series/test_parity_stat.py 
b/python/pyspark/pandas/tests/connect/series/test_parity_stat.py
index 17e83fa3b47..916e120f99d 100644
--- a/python/pyspark/pandas/tests/connect/series/test_parity_stat.py
+++ b/python/pyspark/pandas/tests/connect/series/test_parity_stat.py
@@ -22,15 +22,8 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 
 
 class SeriesParityStatTests(SeriesStatMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_pct_change(self):
-        super().test_pct_change()
-
-    @unittest.skip("TODO(SPARK-43618): Fix pyspark.sq.column._unary_op to work 
with Spark Connect.")
-    def test_rank(self):
-        super().test_rank()
+
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/test_parity_ewm.py 
b/python/pyspark/pandas/tests/connect/test_parity_ewm.py
index e079f847296..74872820333 100644
--- a/python/pyspark/pandas/tests/connect/test_parity_ewm.py
+++ b/python/pyspark/pandas/tests/connect/test_parity_ewm.py
@@ -22,17 +22,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils, TestUtils
 
 
 class EWMParityTests(EWMTestsMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase, TestUtils):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_ewm_mean(self):
-        super().test_ewm_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_ewm_func(self):
-        super().test_groupby_ewm_func()
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/test_parity_expanding.py 
b/python/pyspark/pandas/tests/connect/test_parity_expanding.py
index a6f2cf9bc3c..7f8b1a3cac2 100644
--- a/python/pyspark/pandas/tests/connect/test_parity_expanding.py
+++ b/python/pyspark/pandas/tests/connect/test_parity_expanding.py
@@ -24,125 +24,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils, TestUtils
 class ExpandingParityTests(
     ExpandingTestsMixin, PandasOnSparkTestUtils, TestUtils, 
ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_count(self):
-        super().test_expanding_count()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_kurt(self):
-        super().test_expanding_kurt()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_max(self):
-        super().test_expanding_max()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_mean(self):
-        super().test_expanding_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_min(self):
-        super().test_expanding_min()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_quantile(self):
-        super().test_expanding_quantile()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_skew(self):
-        super().test_expanding_skew()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_std(self):
-        super().test_expanding_std()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_sum(self):
-        super().test_expanding_sum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_expanding_var(self):
-        super().test_expanding_var()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_count(self):
-        super().test_groupby_expanding_count()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_kurt(self):
-        super().test_groupby_expanding_kurt()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_max(self):
-        super().test_groupby_expanding_max()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_mean(self):
-        super().test_groupby_expanding_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_min(self):
-        super().test_groupby_expanding_min()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_quantile(self):
-        super().test_groupby_expanding_quantile()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_skew(self):
-        super().test_groupby_expanding_skew()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_std(self):
-        super().test_groupby_expanding_std()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_sum(self):
-        super().test_groupby_expanding_sum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_var(self):
-        super().test_groupby_expanding_var()
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/test_parity_namespace.py 
b/python/pyspark/pandas/tests/connect/test_parity_namespace.py
index 72f638ca23c..db7f62fdbd5 100644
--- a/python/pyspark/pandas/tests/connect/test_parity_namespace.py
+++ b/python/pyspark/pandas/tests/connect/test_parity_namespace.py
@@ -22,18 +22,6 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 
 
 class NamespaceParityTests(NamespaceTestsMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_concat_index_axis(self):
-        super().test_concat_index_axis()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_concat_multiindex_sort(self):
-        super().test_concat_multiindex_sort()
-
     @unittest.skip("TODO(SPARK-43655): Enable 
NamespaceParityTests.test_get_index_map.")
     def test_get_index_map(self):
         super().test_get_index_map()
diff --git 
a/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby.py 
b/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby.py
index 5d6b6a80b9b..685ec5c45c5 100644
--- 
a/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby.py
+++ 
b/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby.py
@@ -24,53 +24,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils
 class OpsOnDiffFramesGroupByParityTests(
     OpsOnDiffFramesGroupByTestsMixin, PandasOnSparkTestUtils, 
ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumcount(self):
-        super().test_cumcount()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummax(self):
-        super().test_cummax()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cummin(self):
-        super().test_cummin()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumprod(self):
-        super().test_cumprod()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_cumsum(self):
-        super().test_cumsum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_diff(self):
-        super().test_diff()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_fillna(self):
-        super().test_fillna()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_shift(self):
-        super().test_shift()
+    pass
 
 
 if __name__ == "__main__":
diff --git 
a/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_expanding.py
 
b/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_expanding.py
index 90fa36f3b98..c373268cdb2 100644
--- 
a/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_expanding.py
+++ 
b/python/pyspark/pandas/tests/connect/test_parity_ops_on_diff_frames_groupby_expanding.py
@@ -29,47 +29,7 @@ class OpsOnDiffFramesGroupByExpandingParityTests(
     TestUtils,
     ReusedConnectTestCase,
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_count(self):
-        super().test_groupby_expanding_count()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_min(self):
-        super().test_groupby_expanding_min()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_max(self):
-        super().test_groupby_expanding_max()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_mean(self):
-        super().test_groupby_expanding_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_sum(self):
-        super().test_groupby_expanding_sum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_std(self):
-        super().test_groupby_expanding_std()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_expanding_var(self):
-        super().test_groupby_expanding_var()
+    pass
 
 
 if __name__ == "__main__":
diff --git a/python/pyspark/pandas/tests/connect/test_parity_rolling.py 
b/python/pyspark/pandas/tests/connect/test_parity_rolling.py
index 712c1a10df9..8318bed24f0 100644
--- a/python/pyspark/pandas/tests/connect/test_parity_rolling.py
+++ b/python/pyspark/pandas/tests/connect/test_parity_rolling.py
@@ -24,125 +24,7 @@ from pyspark.testing.pandasutils import 
PandasOnSparkTestUtils, TestUtils
 class RollingParityTests(
     RollingTestsMixin, PandasOnSparkTestUtils, TestUtils, ReusedConnectTestCase
 ):
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_count(self):
-        super().test_groupby_rolling_count()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_kurt(self):
-        super().test_groupby_rolling_kurt()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_max(self):
-        super().test_groupby_rolling_max()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_mean(self):
-        super().test_groupby_rolling_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_min(self):
-        super().test_groupby_rolling_min()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_quantile(self):
-        super().test_groupby_rolling_quantile()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_skew(self):
-        super().test_groupby_rolling_skew()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_std(self):
-        super().test_groupby_rolling_std()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_sum(self):
-        super().test_groupby_rolling_sum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_groupby_rolling_var(self):
-        super().test_groupby_rolling_var()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_count(self):
-        super().test_rolling_count()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_kurt(self):
-        super().test_rolling_kurt()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_max(self):
-        super().test_rolling_max()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_mean(self):
-        super().test_rolling_mean()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_min(self):
-        super().test_rolling_min()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_quantile(self):
-        super().test_rolling_quantile()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_skew(self):
-        super().test_rolling_skew()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_std(self):
-        super().test_rolling_std()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_sum(self):
-        super().test_rolling_sum()
-
-    @unittest.skip(
-        "TODO(SPARK-43611): Fix unexpected `AnalysisException` from Spark 
Connect client."
-    )
-    def test_rolling_var(self):
-        super().test_rolling_var()
+    pass
 
 
 if __name__ == "__main__":
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index ed037f3fd98..30c6e4b4bc0 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -3116,7 +3116,9 @@ class Analyzer(override val catalogManager: 
CatalogManager) extends RuleExecutor
 
         // Finally, generate output columns according to the original 
projectList.
         val finalProjectList = aggregateExprs.map(_.toAttribute)
-        Project(finalProjectList, withWindow)
+        val newProject = Project(finalProjectList, withWindow)
+        newProject.copyTagsFrom(f)
+        newProject
 
       case p: LogicalPlan if !p.childrenResolved => p
 
@@ -3134,7 +3136,9 @@ class Analyzer(override val catalogManager: 
CatalogManager) extends RuleExecutor
 
         // Finally, generate output columns according to the original 
projectList.
         val finalProjectList = aggregateExprs.map(_.toAttribute)
-        Project(finalProjectList, withWindow)
+        val newProject = Project(finalProjectList, withWindow)
+        newProject.copyTagsFrom(a)
+        newProject
 
       // We only extract Window Expressions after all expressions of the 
Project
       // have been resolved, and lateral column aliases are properly handled 
first.
@@ -3151,7 +3155,9 @@ class Analyzer(override val catalogManager: 
CatalogManager) extends RuleExecutor
 
         // Finally, generate output columns according to the original 
projectList.
         val finalProjectList = projectList.map(_.toAttribute)
-        Project(finalProjectList, withWindow)
+        val newProject = Project(finalProjectList, withWindow)
+        newProject.copyTagsFrom(p)
+        newProject
     }
   }
 
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala
index bf9e461744e..190e72a8e66 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala
@@ -265,8 +265,10 @@ abstract class TypeCoercionBase {
             s -> Nil
           } else {
             assert(newChildren.length == 2)
+            val newExcept = Except(newChildren.head, newChildren.last, isAll)
+            newExcept.copyTagsFrom(s)
             val attrMapping = left.output.zip(newChildren.head.output)
-            Except(newChildren.head, newChildren.last, isAll) -> attrMapping
+            newExcept -> attrMapping
           }
 
         case s @ Intersect(left, right, isAll) if s.childrenResolved &&
@@ -276,19 +278,22 @@ abstract class TypeCoercionBase {
             s -> Nil
           } else {
             assert(newChildren.length == 2)
+            val newIntersect = Intersect(newChildren.head, newChildren.last, 
isAll)
+            newIntersect.copyTagsFrom(s)
             val attrMapping = left.output.zip(newChildren.head.output)
-            Intersect(newChildren.head, newChildren.last, isAll) -> attrMapping
+            newIntersect -> attrMapping
           }
 
         case s: Union if s.childrenResolved && !s.byName &&
           s.children.forall(_.output.length == s.children.head.output.length) 
&& !s.resolved =>
           val newChildren: Seq[LogicalPlan] = 
buildNewChildrenWithWiderTypes(s.children)
-
           if (newChildren.isEmpty) {
             s -> Nil
           } else {
             val attrMapping = 
s.children.head.output.zip(newChildren.head.output)
-            s.copy(children = newChildren) -> attrMapping
+            val newUnion = s.copy(children = newChildren)
+            newUnion.copyTagsFrom(s)
+            newUnion -> attrMapping
           }
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to