HyukjinKwon commented on code in PR #41316:
URL: https://github.com/apache/spark/pull/41316#discussion_r1224110612


##########
python/pyspark/worker.py:
##########
@@ -456,6 +456,86 @@ def assign_cols_by_name(runner_conf):
     )
 
 
+# Read and process a serialized user-defined table function (UDTF) from a 
socket.
+# It expects the UDTF to be in a specific format and performs various checks to
+# ensure the UDTF is valid. This function also prepares a mapper function for 
applying
+# the UDTF logic to input rows.
+def read_udtf(pickleSer, infile, eval_type):
+    num_udtfs = read_int(infile)
+    if num_udtfs != 1:
+        raise PySparkValueError(f"Unexpected number of UDTFs. Expected 1 but 
got {num_udtfs}.")
+
+    # See `PythonUDFRunner.writeUDFs`.
+    num_arg = read_int(infile)
+    arg_offsets = [read_int(infile) for _ in range(num_arg)]
+    num_chained_funcs = read_int(infile)
+    if num_chained_funcs != 1:
+        raise PySparkValueError(
+            f"Unexpected number of chained UDTFs. Expected 1 but got 
{num_chained_funcs}."
+        )
+
+    handler, return_type = read_command(pickleSer, infile)
+    if not isinstance(handler, type):
+        raise PySparkRuntimeError(
+            f"Invalid UDTF handler type. Expected a class (type 'type'), but "
+            f"got an instance of {type(handler).__name__}."
+        )
+
+    # Instantiate the UDTF class.
+    try:
+        udtf = handler()
+    except Exception as e:
+        raise PySparkRuntimeError(
+            f"User defined table function encountered an error in "
+            f"the '__init__' method: {str(e)}"
+        )
+
+    # Validate the UDTF
+    if not hasattr(udtf, "eval"):
+        raise PySparkRuntimeError(
+            "Failed to execute the user defined table function because it has 
not "
+            "implemented the 'eval' method. Please add the 'eval' method and 
try "
+            "the query again."
+        )
+
+    # Wrap the UDTF and convert the results.
+    def wrap_udtf(f, return_type):
+        if return_type.needConversion():
+            toInternal = return_type.toInternal
+            return lambda *a: map(toInternal, f(*a))
+        else:
+            return lambda *a: f(*a)
+
+    eval = wrap_udtf(getattr(udtf, "eval"), return_type)
+
+    if hasattr(udtf, "terminate"):
+        terminate = wrap_udtf(getattr(udtf, "terminate"), return_type)
+    else:
+        terminate = None
+
+    def mapper(a):
+        results = tuple(eval(*[a[o] for o in arg_offsets]))

Review Comment:
   They will be batched in `BatchedSerializer` so you don't have to fetch and 
construct them in the Python worker side. Otherwise, it could easily cause 
out-of-memory.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to