Github user HyukjinKwon commented on a diff in the pull request:
https://github.com/apache/spark/pull/20288#discussion_r162230241
--- Diff: python/pyspark/sql/context.py ---
@@ -172,113 +173,29 @@ def range(self, start, end=None, step=1,
numPartitions=None):
"""
return self.sparkSession.range(start, end, step, numPartitions)
- @ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=None):
- """Registers a Python function (including lambda function) or a
:class:`UserDefinedFunction`
- as a UDF. The registered UDF can be used in SQL statements.
-
- :func:`spark.udf.register` is an alias for
:func:`sqlContext.registerFunction`.
-
- In addition to a name and the function itself, `returnType` can be
optionally specified.
- 1) When f is a Python function, `returnType` defaults to a string.
The produced object must
- match the specified type. 2) When f is a
:class:`UserDefinedFunction`, Spark uses the return
- type of the given UDF as the return type of the registered UDF.
The input parameter
- `returnType` is None by default. If given by users, the value must
be None.
-
- :param name: name of the UDF in SQL statements.
- :param f: a Python function, or a wrapped/native
UserDefinedFunction. The UDF can be either
- row-at-a-time or vectorized.
- :param returnType: the return type of the registered UDF.
- :return: a wrapped/native :class:`UserDefinedFunction`
-
- >>> strlen = sqlContext.registerFunction("stringLengthString",
lambda x: len(x))
- >>> sqlContext.sql("SELECT stringLengthString('test')").collect()
- [Row(stringLengthString(test)=u'4')]
-
- >>> sqlContext.sql("SELECT 'foo' AS
text").select(strlen("text")).collect()
- [Row(stringLengthString(text)=u'3')]
-
- >>> from pyspark.sql.types import IntegerType
- >>> _ = sqlContext.registerFunction("stringLengthInt", lambda x:
len(x), IntegerType())
- >>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
- [Row(stringLengthInt(test)=4)]
-
- >>> from pyspark.sql.types import IntegerType
- >>> _ = sqlContext.udf.register("stringLengthInt", lambda x:
len(x), IntegerType())
- >>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
- [Row(stringLengthInt(test)=4)]
-
- >>> from pyspark.sql.types import IntegerType
- >>> from pyspark.sql.functions import udf
- >>> slen = udf(lambda s: len(s), IntegerType())
- >>> _ = sqlContext.udf.register("slen", slen)
- >>> sqlContext.sql("SELECT slen('test')").collect()
- [Row(slen(test)=4)]
-
- >>> import random
- >>> from pyspark.sql.functions import udf
- >>> from pyspark.sql.types import IntegerType
- >>> random_udf = udf(lambda: random.randint(0, 100),
IntegerType()).asNondeterministic()
- >>> new_random_udf = sqlContext.registerFunction("random_udf",
random_udf)
- >>> sqlContext.sql("SELECT random_udf()").collect() # doctest:
+SKIP
- [Row(random_udf()=82)]
- >>> sqlContext.range(1).select(new_random_udf()).collect() #
doctest: +SKIP
- [Row(<lambda>()=26)]
-
- >>> from pyspark.sql.functions import pandas_udf, PandasUDFType
- >>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
- ... def add_one(x):
- ... return x + 1
- ...
- >>> _ = sqlContext.udf.register("add_one", add_one) # doctest:
+SKIP
- >>> sqlContext.sql("SELECT add_one(id) FROM range(3)").collect()
# doctest: +SKIP
- [Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)]
+ """An alias for :func:`spark.udf.register`.
+ See :meth:`pyspark.sql.UDFRegistration.register`.
+
+ .. note:: Deprecated in 2.3.0. Use :func:`spark.udf.register`
instead.
--- End diff --
It shows the doc as below:

I checked the link, `pyspark.sql.UDFRegistration.register` is correct.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]