HyukjinKwon commented on a change in pull request #33625:
URL: https://github.com/apache/spark/pull/33625#discussion_r686425241
##########
File path: python/pyspark/pandas/frame.py
##########
@@ -3459,6 +3458,109 @@ def mask(
cond_inversed = cond._apply_series_op(lambda psser: ~psser)
return self.where(cond_inversed, other)
+ # TODO: Support axis as 1 or 'columns'
+ def mode(self, axis: Axis, numeric_only: bool = False, dropna: bool =
True) -> "DataFrame":
+ """
+ Get the mode(s) of each element along the selected axis.
+
+ The mode of a set of values is the value that appears most often.
+ It can be multiple values.
+
+ Notes
+ -----
+ The current implementation of mode requires joins multiple times
+ (columns count - 1 times when axis is 0 or 'index'), which is
potentially expensive.
+
+ The order of multiple modes (within each column when axis is 0 or
'index') is undetermined.
+
+ Parameters
+ ----------
+ axis : {0 or 'index', 1 or 'columns'}, default 0
+ The axis to iterate over while searching for the mode:
+ * 0 or 'index' : get mode of each column
+ * 1 or 'columns' : get mode of each row.
+
+ numeric_only : bool, default False
+ If True, only apply to numeric columns.
+
+ dropna : bool, default True
+ Don't consider counts of NaN/NaT.
+
+ Returns
+ -------
+ DataFrame
+ The modes of each column or row.
+
+ See Also
+ --------
+ Series.mode : Return the highest frequency value in a Series.
+ Series.value_counts : Return the counts of values in a Series.
+
+ Examples
+ --------
+ >>> psdf = ps.DataFrame(
+ ... [("bird", 2, 2), ("mammal", 4, np.nan), ("arthropod", 8, 0),
("bird", 2, np.nan)],
+ ... index=("falcon", "horse", "spider", "ostrich"),
+ ... columns=("species", "legs", "wings"),
+ ... )
+ >>> psdf
+ species legs wings
+ falcon bird 2 2.0
+ horse mammal 4 NaN
+ spider arthropod 8 0.0
+ ostrich bird 2 NaN
+
+ >>> psdf.mode() # doctest: +SKIP
+ species legs wings
+ 0 bird 2.0 0.0
+ 1 None NaN 2.0
+
+ >>> psdf.mode(dropna=False)
+ species legs wings
+ 0 bird 2 NaN
+
+ >>> psdf.mode(numeric_only=True) # doctest: +SKIP
+ legs wings
+ 0 2.0 0.0
+ 1 NaN 2.0
+ """
+ axis = validate_axis(axis)
+ if axis == 1:
+ raise NotImplementedError("Mode currently only works when axis is
0 or 'index'.")
+
+ data = self if not numeric_only else self._get_numeric_data()
+
+ new_scol = verify_temp_column_name(data._internal.spark_frame,
"__row_index__")
+
+ def scol_mode(col: Column) -> SparkDataFrame:
+ if dropna:
+ sdf_dropna = data._internal.spark_frame.select(col).dropna()
+ else:
+ sdf_dropna = data._internal.spark_frame
+ count_df = sdf_dropna.groupBy(col).count()
Review comment:
One workaround might be to use `mapInPandas` and do the manual
aggregation for all columns. e.g.)
```python
def aggregate(iterator):
counts = defaultdict(lambda: defaultdict(int))
for pdf in iterator:
ser = pdf.values_count()
counts[ser.name] = ser.to_dict()
return dict(counts)
result = df.mapInPandas(aggregate, schema="map<string, map<string, int>>")
# final aggregate on result
```
or we could try to implement it to use it only in PySpark (e.g., at
`PythonSQLUtils`) by leveraging `RDD.treeAggregate`.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]