mengxr commented on a change in pull request #24643:
[SPARK-26412][PySpark][SQL][WIP] Allow Pandas UDF to take an iterator of
pd.Series or an iterator of tuple of pd.Series
URL: https://github.com/apache/spark/pull/24643#discussion_r291393738
##########
File path: python/pyspark/worker.py
##########
@@ -86,21 +86,29 @@ def wrap_udf(f, return_type):
return lambda *a: f(*a)
-def wrap_scalar_pandas_udf(f, return_type):
+def verify_scalar_pandas_udf_result_length(result, length):
+ if len(result) != length:
+ raise RuntimeError("Result vector from pandas_udf was not the required
length: "
+ "expected %d, got %d" % (length, len(result)))
+ return result
+
+
+def wrap_scalar_pandas_udf(f, return_type, eval_type):
arrow_return_type = to_arrow_type(return_type)
- def verify_result_length(*a):
- result = f(*a)
+ def verify_result_type(result):
if not hasattr(result, "__len__"):
pd_type = "Pandas.DataFrame" if type(return_type) == StructType
else "Pandas.Series"
raise TypeError("Return type of the user-defined function should
be "
"{}, but is {}".format(pd_type, type(result)))
- if len(result) != len(a[0]):
- raise RuntimeError("Result vector from pandas_udf was not the
required length: "
- "expected %d, got %d" % (len(a[0]),
len(result)))
return result
- return lambda *a: (verify_result_length(*a), arrow_return_type)
+ if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF:
+ return lambda *a: (verify_scalar_pandas_udf_result_length(
+ verify_result_type(f(*a)), len(a[0])), arrow_return_type)
+ else:
+ return lambda *iterator: map(lambda res: (res, arrow_return_type),
Review comment:
Put an inline comment to mention the result length verification is done at
the end of a partition.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]