Github user BryanCutler commented on a diff in the pull request:

    https://github.com/apache/spark/pull/19325#discussion_r140834562
  
    --- Diff: python/pyspark/sql/functions.py ---
    @@ -2183,14 +2183,29 @@ def pandas_udf(f=None, returnType=StringType()):
         :param f: python function if used as a standalone function
         :param returnType: a :class:`pyspark.sql.types.DataType` object
     
    -    # TODO: doctest
    +    >>> from pyspark.sql.types import IntegerType, StringType
    +    >>> slen = pandas_udf(lambda s: s.str.len(), IntegerType())
    +    >>> @pandas_udf(returnType=StringType())
    +    ... def to_upper(s):
    +    ...     return s.str.upper()
    +    ...
    +    >>> @pandas_udf(returnType="integer")
    +    ... def add_one(x):
    +    ...     return x + 1
    +    ...
    +    >>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", 
"age"))
    +    >>> df.select(slen("name").alias("slen(name)"), to_upper("name"), 
add_one("age")).show()
    +    +----------+--------------+------------+
    +    |slen(name)|to_upper(name)|add_one(age)|
    +    +----------+--------------+------------+
    +    |         8|      JOHN DOE|          22|
    +    +----------+--------------+------------+
         """
    +    wrapped_udf = _create_udf(f, returnType=returnType, vectorized=True)
         import inspect
    -    # If function "f" does not define the optional kwargs, then wrap with 
a kwargs placeholder
    -    if inspect.getargspec(f).keywords is None:
    -        return _create_udf(lambda *a, **kwargs: f(*a), 
returnType=returnType, vectorized=True)
    -    else:
    -        return _create_udf(f, returnType=returnType, vectorized=True)
    +    if not inspect.getargspec(wrapped_udf.func).args:
    --- End diff --
    
    I'll look into this some more


---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to