zhengruifeng commented on code in PR #54084:
URL: https://github.com/apache/spark/pull/54084#discussion_r2757619260


##########
python/pyspark/testing/goldenutils.py:
##########
@@ -0,0 +1,497 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from typing import Any, Callable, Iterable, Optional, TYPE_CHECKING
+import concurrent.futures
+import inspect
+import os
+import re
+import time
+
+from pyspark.testing.utils import have_pandas, have_numpy
+
+if have_pandas:
+    import pandas as pd
+if have_numpy:
+    import numpy as np
+
+if TYPE_CHECKING:
+    from pyspark.sql.types import DataType
+
+
+class GoldenFileTestMixin:
+    """
+    Mixin class providing utilities for golden file based testing.
+
+    Golden files are CSV files that store expected test results. This mixin 
provides:
+    - Timezone setup/teardown for deterministic results
+    - Golden file read/write with SPARK_GENERATE_GOLDEN_FILES env var support
+    - Result string cleaning utilities
+
+    To regenerate golden files, set SPARK_GENERATE_GOLDEN_FILES=1 before 
running tests.
+    """
+
+    _tz_prev: Optional[str] = None
+
+    @classmethod
+    def setUpClass(cls) -> None:
+        """Setup test class with timezone configuration."""
+        super().setUpClass()
+        cls.setup_timezone()
+
+    @classmethod
+    def tearDownClass(cls) -> None:
+        """Teardown test class and restore timezone."""
+        cls.teardown_timezone()
+        super().tearDownClass()
+
+    @classmethod
+    def setup_timezone(cls, tz: str = "America/Los_Angeles") -> None:
+        """
+        Setup timezone for deterministic test results.
+        Synchronizes timezone between Python and Java.
+        """
+        cls._tz_prev = os.environ.get("TZ", None)
+        os.environ["TZ"] = tz
+        time.tzset()
+
+        cls.sc.environment["TZ"] = tz
+        cls.spark.conf.set("spark.sql.session.timeZone", tz)
+
+    @classmethod
+    def teardown_timezone(cls) -> None:
+        """Restore original timezone."""
+        if "TZ" in os.environ:
+            del os.environ["TZ"]
+        if cls._tz_prev is not None:
+            os.environ["TZ"] = cls._tz_prev
+        time.tzset()
+
+    @staticmethod
+    def is_generating_golden() -> bool:
+        """Check if we are generating golden files (vs testing against 
them)."""
+        return os.environ.get("SPARK_GENERATE_GOLDEN_FILES", "0") == "1"
+
+    @staticmethod
+    def load_golden_csv(golden_csv: str, use_index: bool = True) -> 
"pd.DataFrame":
+        """
+        Load golden file from CSV.
+
+        Parameters
+        ----------
+        golden_csv : str
+            Path to the golden CSV file.
+        use_index : bool
+            If True, use first column as index (for matrix format).
+            If False, don't use index (for row list format).
+
+        Returns
+        -------
+        pd.DataFrame
+            The loaded golden data with string dtype.
+        """
+        return pd.read_csv(
+            golden_csv,
+            sep="\t",
+            index_col=0 if use_index else None,
+            dtype="str",
+            na_filter=False,
+            engine="python",
+        )
+
+    @staticmethod
+    def save_golden(df: "pd.DataFrame", golden_csv: str, golden_md: 
Optional[str] = None) -> None:
+        """
+        Save DataFrame as golden file (CSV and optionally Markdown).
+
+        Parameters
+        ----------
+        df : pd.DataFrame
+            The DataFrame to save.
+        golden_csv : str
+            Path to save the CSV file.
+        golden_md : str, optional
+            Path to save the Markdown file. Requires tabulate package.
+        """
+        df.to_csv(golden_csv, sep="\t", header=True, index=True)
+
+        if golden_md is not None:
+            try:
+                df.to_markdown(golden_md, index=True, tablefmt="github")
+            except Exception as e:
+                import warnings
+
+                warnings.warn(
+                    f"Failed to write markdown file {golden_md}: {e}. "
+                    "Install 'tabulate' package to generate markdown files."
+                )
+
+    @staticmethod
+    def repr_spark_type(spark_type: "DataType") -> str:
+        """Convert Spark type to string representation."""
+        return spark_type.simpleString()
+
+    @classmethod
+    def repr_value(
+        cls,
+        value: Any,
+        max_len: int = 32,
+        type_override: Optional[str] = None,
+    ) -> str:
+        """
+        Convert Python value to string representation for golden file.
+
+        Format: "value_str@type_info"
+        - For numpy.ndarray: includes dtype, e.g., "[1 2]@ndarray[int64]"
+        - For pandas.DataFrame: includes schema, e.g., "{...}@DataFrame[_1 
int64]"
+        - For list: includes element types, e.g., "[1, 2]@List[int]"
+        - For other types: uses type name, e.g., "True@bool"
+
+        Java object hash codes are normalized (e.g., @69420149 -> @<hash>)
+        for deterministic test results.
+
+        Parameters
+        ----------
+        value : Any
+            The Python value to represent.
+        max_len : int, default 32
+            Maximum length for the value string portion.
+        type_override : str, optional
+            If provided, use this as the type string instead of auto-detecting.
+
+        Returns
+        -------
+        str
+            String representation in format "value@type".
+        """
+        # Get value string representation
+        if have_pandas and isinstance(value, pd.DataFrame):
+            v_str = value.to_json()
+        elif have_pandas and isinstance(value, pd.Series):
+            v_str = str(value.tolist())
+        else:
+            v_str = str(value)
+
+        # Get type string
+        type_str = type_override if type_override is not None else 
cls.repr_type(value)
+
+        # Clean up: replace newlines, normalize Java hash codes, then truncate
+        v_str = v_str.replace("\n", " ")
+        v_str = re.sub(r"@[a-fA-F0-9]+", "@<hash>", v_str)
+        v_str = v_str[:max_len]
+        return f"{v_str}@{type_str}"
+
+    @classmethod
+    def repr_type(cls, value: Any) -> str:
+        """
+        Get the type representation string for a value (recursively for 
containers).
+
+        Parameters
+        ----------
+        value : Any
+            The value to get type representation for.
+
+        Returns
+        -------
+        str
+            Type string, e.g., "int", "list[int | NoneType]", "DataFrame[col1 
int64]".
+        """
+        return cls._repr_element_type(value)
+
+    @classmethod
+    def _repr_element_type(cls, elem: Any) -> str:
+        """
+        Recursively get the type representation for an element.
+
+        For containers (list, dict, tuple, DataFrame), inspects nested element 
types.
+        """
+        if elem is None:
+            return "NoneType"
+        elif have_pandas and isinstance(elem, pd.DataFrame):

Review Comment:
   I think we don't need to check `have_pandas`? it should always be true



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to