HyukjinKwon commented on a change in pull request #34931:
URL: https://github.com/apache/spark/pull/34931#discussion_r773022383
##########
File path: python/pyspark/pandas/frame.py
##########
@@ -8828,22 +8846,154 @@ def describe(self, percentiles: Optional[List[float]]
= None) -> "DataFrame":
else:
percentiles = [0.25, 0.5, 0.75]
- formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
- stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
+ if len(exprs_numeric) == 0:
+ if len(exprs_non_numeric) == 0:
+ raise ValueError("Cannot describe a DataFrame without columns")
- sdf = self._internal.spark_frame.select(*exprs).summary(*stats)
- sdf = sdf.replace("stddev", "std", subset=["summary"])
+ # Handling non-numeric type columns
+ # We will retrive the `count`, `unique`, `top` and `freq`.
+ sdf = self._internal.spark_frame.select(*exprs_non_numeric)
- internal = InternalFrame(
- spark_frame=sdf,
- index_spark_columns=[scol_for(sdf, "summary")],
- column_labels=column_labels,
- data_spark_columns=[
- scol_for(sdf, self._internal.spark_column_name_for(label))
- for label in column_labels
- ],
- )
- return DataFrame(internal).astype("float64")
+ # Get `count` & `unique` for each columns
+ counts, uniques = map(lambda x: x[1:], sdf.summary("count",
"count_distinct").take(2))
+
+ # Get `top` & `freq` for each columns
+ tops = []
+ freqs = []
+ for column in exprs_non_numeric:
+ top, freq = sdf.groupby(column).count().sort("count",
ascending=False).first()
+ tops.append(str(top))
+ freqs.append(str(freq))
+
+ stats = [counts, uniques, tops, freqs]
+ stats_names = ["count", "unique", "top", "freq"]
+
+ result: DataFrame = DataFrame(
+ data=stats,
+ index=stats_names,
+ columns=column_names,
+ )
+ elif any(map(lambda bool_and_type: bool_and_type[0],
is_timestamp_types)):
+ # Handling numeric & timestamp type columns
+ # If DataFrame has timestamp type column, we cannot use `summary`
+ # so should manually calculate the stats for each column.
+ column_names = list(map(lambda x: x[0], column_labels))
+ column_length = len(column_labels)
+
+ # If DataFrame has only timestamp column, we don't need to compute
`std`.
+ is_all_timestamp_types = all(
+ map(lambda bool_and_type: bool_and_type[0], is_timestamp_types)
+ )
+
+ # Apply stat functions for each column.
+ count_exprs = map(F.count, column_names)
+ min_exprs = map(F.min, column_names)
+ perc_expr_list = [
+ map(F.percentile_approx, column_names, [percentile] *
column_length)
+ for percentile in percentiles
+ ]
+ perc_exprs: List[Column] = []
+ for perc_expr in perc_expr_list:
+ perc_exprs += perc_expr
+ max_exprs = map(F.max, column_names)
+ if is_all_timestamp_types:
+ mean_exprs = []
+ for column_name, is_timestamp_type in zip(column_names,
is_timestamp_types):
+
mean_exprs.append(F.mean(column_name).astype(is_timestamp_type[1]))
+ exprs = [*count_exprs, *mean_exprs, *min_exprs, *perc_exprs,
*max_exprs]
+ else:
+ mean_exprs = []
+ std_exprs = []
+ for column_name, is_timestamp_type in zip(column_names,
is_timestamp_types):
+ if is_timestamp_type[0]:
+ std_exprs.append(
+
F.lit(str(pd.NaT)).alias("stddev_samp({})".format(column_name))
+ )
+
mean_exprs.append(F.mean(column_name).astype(is_timestamp_type[1]))
+ else:
+ std_exprs.append(F.stddev(column_name))
+ mean_exprs.append(F.mean(column_name))
+ exprs = [*count_exprs, *mean_exprs, *min_exprs, *perc_exprs,
*max_exprs, *std_exprs]
+
+ # Select stats for all columns at once.
+ sdf = self._internal.spark_frame.select(exprs)
+ stat_values = sdf.first()
+
+ counts = []
+ means = []
+ mins = []
+ # `percentiles` is variable length according to user input.
+ # Therefore, it cannot be assigned to a fixed variable such as
+ # `percs1`, `percs2`, `percs3`.
+ # So we will create a list of list equal to the length of
percentiles,
+ # and store the each percentile value in each list.
+ percs: List[List] = [[] for _ in range(len(percentiles))]
+ maxs = []
+ if not is_all_timestamp_types:
Review comment:
I think doing `is_all_timestamp_types` together here makes this codes
convoluted. Let's extract this case out.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]