mengxr commented on a change in pull request #24643: 
[SPARK-26412][PySpark][SQL][WIP] Allow Pandas UDF to take an iterator of 
pd.Series or an iterator of tuple of pd.Series
URL: https://github.com/apache/spark/pull/24643#discussion_r291397336
 
 

 ##########
 File path: python/pyspark/worker.py
 ##########
 @@ -255,13 +269,54 @@ def read_udfs(pickleSer, infile, eval_type):
 
         # Scalar Pandas UDF handles struct type arguments as pandas DataFrames 
instead of
         # pandas Series. See SPARK-27240.
-        df_for_struct = eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF
+        df_for_struct = (eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF or
+                         eval_type == 
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF)
         ser = ArrowStreamPandasUDFSerializer(timezone, safecheck, 
assign_cols_by_name,
                                              df_for_struct)
     else:
         ser = BatchedSerializer(PickleSerializer(), 100)
 
     num_udfs = read_int(infile)
+
+    if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF:
+        assert num_udfs == 1, "One SQL_SCALAR_PANDAS_ITER_UDF expected here."
+
+        arg_offsets, udf = read_single_udf(
+            pickleSer, infile, eval_type, runner_conf, udf_index=i)
+
+        def func(_, iterator):
+            num_input_rows = [0]
+
+            def map_batch(batch):
+                udf_args = [batch[offset] for offset in arg_offsets]
+                num_input_rows[0] += len(udf_args[0])
+                if len(udf_args) == 1:
+                    return udf_args[0]
+                else:
+                    return tuple(udf_args)
+
+            iterator = map(map_batch, iterator)
+            result_iter = udf(iterator)
+
+            num_output_rows = 0
+            for result_batch, result_type in result_iter:
+                num_output_rows += len(result_batch)
+                assert num_output_rows <= num_input_rows[0], \
+                    "Pandas iterator UDF generate more rows then read rows."
+                yield (result_batch, result_type)
+            try:
+                iterator.__next__()
+                raise Exception("SQL_SCALAR_PANDAS_ITER_UDF should exhaust the 
input iterator.")
 
 Review comment:
   ~~~python
   try:
     iterator.__next__()
   except StopIteration:
     pass
   else:
     raise RuntimeError("SQL_SCALAR_PANDAS_ITER_UDF should exhaust the input 
iterator.")
   ~~~
     

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to