Github user gatorsmile commented on a diff in the pull request:
https://github.com/apache/spark/pull/20171#discussion_r160086985
--- Diff: python/pyspark/sql/catalog.py ---
@@ -265,12 +267,23 @@ def registerFunction(self, name, f,
returnType=StringType()):
[Row(random_udf()=u'82')]
>>> spark.range(1).select(newRandom_udf()).collect() # doctest:
+SKIP
[Row(random_udf()=u'62')]
+
+ >>> import random
+ >>> from pyspark.sql.types import IntegerType
+ >>> from pyspark.sql.functions import pandas_udf
+ >>> random_pandas_udf = pandas_udf(
+ ... lambda x: random.randint(0, 100) + x, IntegerType())
+ ... .asNondeterministic() # doctest: +SKIP
+ >>> _ = spark.catalog.registerFunction(
+ ... "random_pandas_udf", random_pandas_udf, IntegerType()) #
doctest: +SKIP
+ >>> spark.sql("SELECT random_pandas_udf(2)").collect() # doctest:
+SKIP
+ [Row(random_pandas_udf(2)=84)]
"""
# This is to check whether the input function is a wrapped/native
UserDefinedFunction
if hasattr(f, 'asNondeterministic'):
udf = UserDefinedFunction(f.func, returnType=returnType,
name=name,
-
evalType=PythonEvalType.SQL_BATCHED_UDF,
+ evalType=f.evalType,
--- End diff --
> when it's not a PythonEvalType.SQL_BATCHED_UDF
->
> when it's neither a `PythonEvalType.SQL_BATCHED_UDF` nor
`PythonEvalType.SQL_PANDAS_SCALAR_UDF`, right?
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]