HyukjinKwon commented on a change in pull request #34931:
URL: https://github.com/apache/spark/pull/34931#discussion_r772222285



##########
File path: python/pyspark/pandas/frame.py
##########
@@ -8828,22 +8843,135 @@ def describe(self, percentiles: Optional[List[float]] 
= None) -> "DataFrame":
         else:
             percentiles = [0.25, 0.5, 0.75]
 
-        formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
-        stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
+        if len(exprs_numeric) == 0:
+            if len(exprs_non_numeric) == 0:
+                raise ValueError("Cannot describe a DataFrame without columns")
 
-        sdf = self._internal.spark_frame.select(*exprs).summary(*stats)
-        sdf = sdf.replace("stddev", "std", subset=["summary"])
+            # Handling non-numeric type columns
+            # We will retrive the `count`, `unique`, `top` and `freq`.
+            sdf = self._internal.spark_frame.select(*exprs_non_numeric)
 
-        internal = InternalFrame(
-            spark_frame=sdf,
-            index_spark_columns=[scol_for(sdf, "summary")],
-            column_labels=column_labels,
-            data_spark_columns=[
-                scol_for(sdf, self._internal.spark_column_name_for(label))
-                for label in column_labels
-            ],
-        )
-        return DataFrame(internal).astype("float64")
+            # Get `count` & `unique` for each columns
+            counts, uniques = map(lambda x: x[1:], sdf.summary("count", 
"count_distinct").take(2))
+
+            # Get `top` & `freq` for each columns
+            tops = []
+            freqs = []
+            for column in exprs_non_numeric:
+                top, freq = sdf.groupby(column).count().sort("count", 
ascending=False).first()
+                tops.append(str(top))
+                freqs.append(str(freq))
+
+            stats = [counts, uniques, tops, freqs]
+            stats_names = ["count", "unique", "top", "freq"]
+
+            result: DataFrame = DataFrame(
+                data=stats,
+                index=stats_names,
+                columns=column_names,
+            )
+        elif any(is_timestamp_types):
+            # Handling numeric & timestamp type columns
+            # If DataFrame has timestamp type column, we cannot use `summary`
+            # so should manually calculate the stats for each column.
+            column_names = list(map(lambda x: x[0], column_labels))
+            column_length = len(column_labels)
+
+            # If DataFrame has only timestamp column, we don't need to compute 
`std`.
+            is_all_timestamp_types = all(is_timestamp_types)
+
+            # Apply stat functions for each column.
+            count_exprs = map(F.count, column_names)
+            min_exprs = map(F.min, column_names)
+            perc_expr_list = [
+                map(F.percentile_approx, column_names, [percentile] * 
column_length)
+                for percentile in percentiles
+            ]
+            perc_exprs: List[Column] = []
+            for perc_expr in perc_expr_list:
+                perc_exprs += perc_expr
+            max_exprs = map(F.max, column_names)
+            if is_all_timestamp_types:
+                mean_exprs = list(map(lambda x: 
F.mean(x).astype(TimestampType()), column_names))
+                exprs = [*count_exprs, *mean_exprs, *min_exprs, *perc_exprs, 
*max_exprs]
+            else:
+                mean_exprs = []
+                std_exprs = []
+                for column_name, is_timestamp_type in zip(column_names, 
is_timestamp_types):
+                    if is_timestamp_type:
+                        std_exprs.append(
+                            
F.lit(str(pd.NaT)).alias("stddev_samp({})".format(column_name))
+                        )
+                        
mean_exprs.append(F.mean(column_name).astype(TimestampType()))
+                    else:
+                        std_exprs.append(F.stddev(column_name))
+                        mean_exprs.append(F.mean(column_name))
+                exprs = [*count_exprs, *mean_exprs, *min_exprs, *perc_exprs, 
*max_exprs, *std_exprs]
+
+            # Select stats for all columns at once.
+            sdf = self._internal.spark_frame.select(exprs)
+            stat_values = sdf.first()
+
+            counts = []
+            means = []
+            mins = []
+            percs: List[List] = [[] for _ in range(len(percentiles))]
+            maxs = []
+            if not is_all_timestamp_types:
+                stds = []
+            for i, is_timestamp_type in zip(range(column_length), 
is_timestamp_types):
+                if is_timestamp_type:
+                    counts.append(str(stat_values[i]))
+                    means.append(str(stat_values[i + column_length]))
+                    mins.append(str(stat_values[i + column_length * 2]))
+                    for j in range(len(percentiles)):
+                        percs[j].append(str(stat_values[i + column_length * (3 
+ j)]))

Review comment:
       Mind adding some comments here? it's difficult to follow what these 
these i, j, 3, 2, 1  mean




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to