techaddict commented on code in PR #39347:
URL: https://github.com/apache/spark/pull/39347#discussion_r1060253620


##########
python/pyspark/sql/connect/functions.py:
##########
@@ -2317,3 +2318,141 @@ def unwrap_udt(col: "ColumnOrName") -> Column:
 
 
 unwrap_udt.__doc__ = pysparkfuncs.unwrap_udt.__doc__
+
+
+def _test() -> None:
+    import os
+    import sys
+    import doctest
+    from pyspark.sql import SparkSession as PySparkSession
+    from pyspark.testing.connectutils import should_test_connect, 
connect_requirement_message
+
+    os.chdir(os.environ["SPARK_HOME"])
+
+    if should_test_connect:
+        import pyspark.sql.connect.functions
+
+        globs = pyspark.sql.connect.functions.__dict__.copy()
+        # Works around to create a regular Spark session
+        sc = SparkContext("local[4]", "sql.connect.functions tests", 
conf=SparkConf())
+        globs["_spark"] = PySparkSession(
+            sc, options={"spark.app.name": "sql.connect.functions tests"}
+        )
+
+        # TODO(SPARK-41833): fix collect() output
+        del pyspark.sql.connect.functions.array.__doc__
+        del pyspark.sql.connect.functions.array_distinct.__doc__
+        del pyspark.sql.connect.functions.array_except.__doc__
+        del pyspark.sql.connect.functions.array_intersect.__doc__
+        del pyspark.sql.connect.functions.array_remove.__doc__
+        del pyspark.sql.connect.functions.array_repeat.__doc__
+        del pyspark.sql.connect.functions.array_sort.__doc__
+        del pyspark.sql.connect.functions.array_union.__doc__
+        del pyspark.sql.connect.functions.collect_list.__doc__
+        del pyspark.sql.connect.functions.collect_set.__doc__
+        del pyspark.sql.connect.functions.concat.__doc__
+        del pyspark.sql.connect.functions.create_map.__doc__
+        del pyspark.sql.connect.functions.date_trunc.__doc__
+        del pyspark.sql.connect.functions.from_utc_timestamp.__doc__
+        del pyspark.sql.connect.functions.from_csv.__doc__
+        del pyspark.sql.connect.functions.from_json.__doc__
+        del pyspark.sql.connect.functions.isnull.__doc__
+        del pyspark.sql.connect.functions.reverse.__doc__
+        del pyspark.sql.connect.functions.sequence.__doc__
+        del pyspark.sql.connect.functions.slice.__doc__
+        del pyspark.sql.connect.functions.sort_array.__doc__
+        del pyspark.sql.connect.functions.split.__doc__
+        del pyspark.sql.connect.functions.struct.__doc__
+        del pyspark.sql.connect.functions.to_timestamp.__doc__
+        del pyspark.sql.connect.functions.to_utc_timestamp.__doc__
+        del pyspark.sql.connect.functions.unhex.__doc__
+
+        # TODO(SPARK-41825): Dataframe.show formatting int as double
+        del pyspark.sql.connect.functions.coalesce.__doc__
+        del pyspark.sql.connect.functions.sum_distinct.__doc__
+
+        # TODO(SPARK-41834): implement Dataframe.conf
+        del pyspark.sql.connect.functions.from_unixtime.__doc__
+        del pyspark.sql.connect.functions.timestamp_seconds.__doc__
+        del pyspark.sql.connect.functions.unix_timestamp.__doc__
+
+        # TODO(SPARK-41757): Fix String representation for Column class
+        del pyspark.sql.connect.functions.col.__doc__
+
+        # TODO: support data type: Timestamp(NANOSECOND, null)

Review Comment:
   added the jira



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to