dgd-contributor commented on a change in pull request #33858:
URL: https://github.com/apache/spark/pull/33858#discussion_r711607257
##########
File path: python/pyspark/pandas/series.py
##########
@@ -4463,6 +4466,161 @@ def replace(
return self._with_new_scol(current) # TODO: dtype?
+ def combine(
+ self,
+ other: "Series",
+ func: Callable,
+ fill_value: Optional[Any] = None,
+ ) -> "Series":
+ """
+ Combine the Series with a Series or scalar according to `func`.
+
+ Combine the Series and `other` using `func` to perform elementwise
+ selection for combined Series.
+ `fill_value` is assumed when value is missing at some index
+ from one of the two objects being combined.
+
+ .. versionadded:: 3.3.0
+
+ .. note:: this API executes the function once to infer the type which
is
+ potentially expensive, for instance, when the dataset is created
after
+ aggregations or sorting.
+
+ To avoid this, specify return type in ``func``, for instance, as
below:
+
+ >>> def foo(x, y) -> np.int32:
+ ... return x * y
+
+ pandas-on-Spark uses return type hint and does not try to infer
the type.
+
+ Parameters
+ ----------
+ other : Series or scalar
+ The value(s) to be combined with the `Series`.
+ func : function
+ Function that takes two scalars as inputs and returns an element.
+ Note that type hint for return type is required.
+ fill_value : scalar, optional
+ The value to assume when an index is missing from
+ one Series or the other. The default specifies to use the
+ appropriate NaN value for the underlying dtype of the Series.
+
+ Returns
+ -------
+ Series
+ The result of combining the Series with the other object.
+
+ See Also
+ --------
+ Series.combine_first : Combine Series values, choosing the calling
+ Series' values first.
+
+ Examples
+ --------
+ Consider 2 Datasets ``s1`` and ``s2`` containing
+ highest clocked speeds of different birds.
+
+ >>> from pyspark.pandas.config import set_option, reset_option
+ >>> set_option("compute.ops_on_diff_frames", True)
+ >>> s1 = ps.Series({'falcon': 330.0, 'eagle': 160.0})
+ >>> s1
+ falcon 330.0
+ eagle 160.0
+ dtype: float64
+ >>> s2 = ps.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
+ >>> s2
+ falcon 345.0
+ eagle 200.0
+ duck 30.0
+ dtype: float64
+
+ Now, to combine the two datasets and view the highest speeds
+ of the birds across the two datasets
+
+ >>> s1.combine(s2, max)
+ duck NaN
+ eagle 200.0
+ falcon 345.0
+ dtype: float64
+
+ In the previous example, the resulting value for duck is missing,
+ because the maximum of a NaN and a float is a NaN.
+ So, in the example, we set ``fill_value=0``,
+ so the maximum value returned will be the value from some dataset.
+
+ >>> s1.combine(s2, max, fill_value=0)
+ duck 30.0
+ eagle 200.0
+ falcon 345.0
+ dtype: float64
+ >>> reset_option("compute.ops_on_diff_frames")
+ """
+ if not isinstance(other, Series) and not np.isscalar(other):
+ raise TypeError("unsupported type: %s" % type(other))
+
+ assert callable(func), "argument func must be a callable function."
+
+ if np.isscalar(other):
+ tmp_other_col =
verify_temp_column_name(self._internal.spark_frame, "__tmp_other_col__")
+ combined = self.to_frame()
+ combined[tmp_other_col] = other
+ combined = DataFrame(combined._internal.resolved_copy)
+ elif same_anchor(self, other):
+ combined = self._psdf[self._column_label, other._column_label]
+ elif fill_value is None:
+ combined = combine_frames(self.to_frame(), other.to_frame())
+ else:
+ combined = self._combine_frame_with_fill_value(other,
fill_value=fill_value)
+
+ try:
+ sig_return = infer_return_type(func)
+ if isinstance(sig_return, UnknownType):
+ raise TypeError()
+ return_type = sig_return.spark_type
+ except TypeError:
+ limit = ps.get_option("compute.shortcut_limit")
+ pdf = combined.head(limit + 1)._to_internal_pandas()
+ combined_pser = pdf.iloc[:, 0].combine(pdf.iloc[:, 1], func,
fill_value=fill_value)
+ return_type = as_spark_type(combined_pser.dtype)
+
+ @pandas_udf(returnType=return_type) # type: ignore
+ def wrapped_func(x: pd.Series, y: pd.Series) -> pd.Series:
+ return x.combine(y, func)
+
+ scol = wrapped_func(*combined._internal.data_spark_columns)
+ combined_sdf = combined._internal.spark_frame.select(
+ *combined._internal.index_spark_columns,
+
scol.alias(self._internal.spark_column_name_for(self.spark.column)),
+ NATURAL_ORDER_COLUMN_NAME
+ )
+ internal = InternalFrame(
+ spark_frame=combined_sdf,
+ index_spark_columns=combined._internal.index_spark_columns,
+ )
Review comment:
updated, please take another look when you have time.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]