HyukjinKwon commented on a change in pull request #34931:
URL: https://github.com/apache/spark/pull/34931#discussion_r773016622



##########
File path: python/pyspark/pandas/frame.py
##########
@@ -8828,22 +8846,154 @@ def describe(self, percentiles: Optional[List[float]] 
= None) -> "DataFrame":
         else:
             percentiles = [0.25, 0.5, 0.75]
 
-        formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
-        stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
+        if len(exprs_numeric) == 0:
+            if len(exprs_non_numeric) == 0:
+                raise ValueError("Cannot describe a DataFrame without columns")
 
-        sdf = self._internal.spark_frame.select(*exprs).summary(*stats)
-        sdf = sdf.replace("stddev", "std", subset=["summary"])
+            # Handling non-numeric type columns
+            # We will retrive the `count`, `unique`, `top` and `freq`.
+            sdf = self._internal.spark_frame.select(*exprs_non_numeric)
 
-        internal = InternalFrame(
-            spark_frame=sdf,
-            index_spark_columns=[scol_for(sdf, "summary")],
-            column_labels=column_labels,
-            data_spark_columns=[
-                scol_for(sdf, self._internal.spark_column_name_for(label))
-                for label in column_labels
-            ],
-        )
-        return DataFrame(internal).astype("float64")
+            # Get `count` & `unique` for each columns
+            counts, uniques = map(lambda x: x[1:], sdf.summary("count", 
"count_distinct").take(2))
+
+            # Get `top` & `freq` for each columns
+            tops = []
+            freqs = []
+            for column in exprs_non_numeric:
+                top, freq = sdf.groupby(column).count().sort("count", 
ascending=False).first()
+                tops.append(str(top))
+                freqs.append(str(freq))
+
+            stats = [counts, uniques, tops, freqs]
+            stats_names = ["count", "unique", "top", "freq"]
+
+            result: DataFrame = DataFrame(
+                data=stats,
+                index=stats_names,
+                columns=column_names,
+            )
+        elif any(map(lambda bool_and_type: bool_and_type[0], 
is_timestamp_types)):
+            # Handling numeric & timestamp type columns
+            # If DataFrame has timestamp type column, we cannot use `summary`
+            # so should manually calculate the stats for each column.
+            column_names = list(map(lambda x: x[0], column_labels))
+            column_length = len(column_labels)
+
+            # If DataFrame has only timestamp column, we don't need to compute 
`std`.
+            is_all_timestamp_types = all(

Review comment:
       Can we pull this condition to the parent if-else branch instead of 
handling here together?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to