ueshin commented on a change in pull request #30242:
URL: https://github.com/apache/spark/pull/30242#discussion_r535582797



##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/python/EvalPythonExec.scala
##########
@@ -137,3 +139,53 @@ trait EvalPythonExec extends UnaryExecNode {
     }
   }
 }
+
+/**
+ * A TaskContext aware iterator.
+ *
+ * As the Python evaluation consumes the parent iterator in a separate thread,
+ * it could consume more data from the parent even after the task ends and the 
parent is closed.
+ * Thus, we should use ContextAwareIterator to stop consuming after the task 
ends.
+ */
+class ContextAwareIterator[IN](iter: Iterator[IN], context: TaskContext) 
extends Iterator[IN] {
+
+  private val thread = new AtomicReference[Thread]()
+
+  if (iter.hasNext) {
+    val failed = new AtomicBoolean(false)
+
+    context.addTaskFailureListener { (_, _) =>
+      failed.set(true)
+    }
+
+    context.addTaskCompletionListener[Unit] { _ =>
+      var thread = this.thread.get()
+
+      // Wait for a while since the writer thread might not reach to consuming 
the iterator yet.
+      while (thread == null && !failed.get()) {
+        // Use `context.wait()` instead of `Thread.sleep()` here since the 
task completion lister
+        // works under `synchronized(context)`. We might need to consider to 
improve in the future.
+        // It's a bad idea to hold an implicit lock when calling user's 
listener because it's
+        // pretty easy to cause surprising deadlock.

Review comment:
       I see. Let me change the strategy here.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to