Github user BryanCutler commented on a diff in the pull request:
https://github.com/apache/spark/pull/19325#discussion_r140834239
--- Diff: python/pyspark/sql/functions.py ---
@@ -2183,14 +2183,29 @@ def pandas_udf(f=None, returnType=StringType()):
:param f: python function if used as a standalone function
:param returnType: a :class:`pyspark.sql.types.DataType` object
- # TODO: doctest
+ >>> from pyspark.sql.types import IntegerType, StringType
+ >>> slen = pandas_udf(lambda s: s.str.len(), IntegerType())
+ >>> @pandas_udf(returnType=StringType())
+ ... def to_upper(s):
+ ... return s.str.upper()
+ ...
+ >>> @pandas_udf(returnType="integer")
+ ... def add_one(x):
+ ... return x + 1
+ ...
+ >>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name",
"age"))
+ >>> df.select(slen("name").alias("slen(name)"), to_upper("name"),
add_one("age")).show()
+ +----------+--------------+------------+
+ |slen(name)|to_upper(name)|add_one(age)|
+ +----------+--------------+------------+
+ | 8| JOHN DOE| 22|
+ +----------+--------------+------------+
"""
+ wrapped_udf = _create_udf(f, returnType=returnType, vectorized=True)
import inspect
- # If function "f" does not define the optional kwargs, then wrap with
a kwargs placeholder
- if inspect.getargspec(f).keywords is None:
- return _create_udf(lambda *a, **kwargs: f(*a),
returnType=returnType, vectorized=True)
- else:
- return _create_udf(f, returnType=returnType, vectorized=True)
+ if not inspect.getargspec(wrapped_udf.func).args:
--- End diff --
Yeah, it probably would be a good idea to be explicit here since it's not
obvious what type `getargspec` returns
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]