Github user HyukjinKwon commented on a diff in the pull request:
https://github.com/apache/spark/pull/18732#discussion_r142586820
--- Diff: python/pyspark/worker.py ---
@@ -74,17 +74,35 @@ def wrap_udf(f, return_type):
def wrap_pandas_udf(f, return_type):
- arrow_return_type = toArrowType(return_type)
-
- def verify_result_length(*a):
- result = f(*a)
- if not hasattr(result, "__len__"):
- raise TypeError("Return type of pandas_udf should be a
Pandas.Series")
- if len(result) != len(a[0]):
- raise RuntimeError("Result vector from pandas_udf was not the
required length: "
- "expected %d, got %d" % (len(a[0]),
len(result)))
- return result
- return lambda *a: (verify_result_length(*a), arrow_return_type)
+ if isinstance(return_type, StructType):
+ arrow_return_types = [to_arrow_type(field.dataType) for field in
return_type]
+
+ def fn(*a):
+ import pandas as pd
+ out = f(*a)
+ assert isinstance(out, pd.DataFrame), \
+ 'Return value from the user function is not a
pandas.DataFrame.'
--- End diff --
little nit: `the user function` -> `the user-defined function`
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]