Github user ueshin commented on a diff in the pull request:

    https://github.com/apache/spark/pull/20217#discussion_r160873545
  
    --- Diff: python/pyspark/sql/context.py ---
    @@ -203,18 +203,46 @@ def registerFunction(self, name, f, 
returnType=StringType()):
             >>> _ = sqlContext.udf.register("stringLengthInt", lambda x: 
len(x), IntegerType())
             >>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
             [Row(stringLengthInt(test)=4)]
    +        """
    +        return self.sparkSession.catalog.registerFunction(name, f, 
returnType)
    +
    +    @ignore_unicode_prefix
    +    @since(2.3)
    +    def registerUDF(self, name, f):
    +        """Registers a :class:`UserDefinedFunction`. The registered UDF 
can be used in SQL
    +        statement.
    +
    +        :param name: name of the UDF
    +        :param f: a wrapped/native UserDefinedFunction. The UDF can be 
either row-at-a-time or
    +                  scalar vectorized. Grouped vectorized UDFs are not 
supported.
    +        :return: a wrapped :class:`UserDefinedFunction`
    +
    +        >>> from pyspark.sql.types import IntegerType
    +        >>> from pyspark.sql.functions import udf
    +        >>> slen = udf(lambda s: len(s), IntegerType())
    +        >>> _ = sqlContext.udf.registerUDF("slen", slen)
    +        >>> sqlContext.sql("SELECT slen('test')").collect()
    +        [Row(slen(test)=4)]
     
             >>> import random
             >>> from pyspark.sql.functions import udf
    -        >>> from pyspark.sql.types import IntegerType, StringType
    +        >>> from pyspark.sql.types import IntegerType
             >>> random_udf = udf(lambda: random.randint(0, 100), 
IntegerType()).asNondeterministic()
    -        >>> newRandom_udf = sqlContext.registerFunction("random_udf", 
random_udf, StringType())
    +        >>> newRandom_udf = sqlContext.registerUDF("random_udf", 
random_udf)
             >>> sqlContext.sql("SELECT random_udf()").collect()  # doctest: 
+SKIP
    -        [Row(random_udf()=u'82')]
    +        [Row(random_udf()=82)]
             >>> sqlContext.range(1).select(newRandom_udf()).collect()  # 
doctest: +SKIP
    -        [Row(random_udf()=u'62')]
    +        [Row(random_udf()=62)]
    +
    +        >>> from pyspark.sql.functions import pandas_udf, PandasUDFType
    +        >>> @pandas_udf("integer", PandasUDFType.SCALAR)  # doctest: +SKIP
    +        ... def add_one(x):
    +        ...     return x + 1
    +        ...
    +        >>> _ = sqlContext.udf.registerUDF("add_one", add_one)  # doctest: 
+SKIP
    +        >>> sqlContext.sql("SELECT add_one(id) FROM range(10)").collect()  
# doctest: +SKIP
    --- End diff --
    
    ditto.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to