This is an automated email from the ASF dual-hosted git repository.

wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 5864e8e  [SPARK-25158][SQL] Executor accidentally exit because 
ScriptTransformationWriterThread throw Exception.
5864e8e is described below

commit 5864e8e47496c3a841b97632e5137de87a91efea
Author: yangjie01 <[email protected]>
AuthorDate: Tue Feb 12 12:16:33 2019 +0800

    [SPARK-25158][SQL] Executor accidentally exit because 
ScriptTransformationWriterThread throw Exception.
    
    ## What changes were proposed in this pull request?
    
    Run Spark-Sql job use transform features(`ScriptTransformationExec`) with 
config `spark.speculation = true`, sometimes job fails and we found many 
Executor Dead through `Executor Tab`, through analysis log and code we found :
    
    `ScriptTransformationExec` start a new 
thread(`ScriptTransformationWriterThread`), the new thread is very likely to 
throw `TaskKilledException`(from iter.map.foreach part) when speculation is on, 
this exception will captured by `SparkUncaughtExceptionHandler` which 
registered during Executor start, `SparkUncaughtExceptionHandler` will call 
`System.exit (SparkExitCode.UNCAUGHT_EXCEPTION)` to shutdown `Executor`, this 
is unexpected.
    
    We should not kill the executor just because 
`ScriptTransformationWriterThread` fails. log the error(not only 
`TaskKilledException`) instead of throwing it is enough, Exception already pass 
to `ScriptTransformationExec` and handle by `TaskRunner`.
    
    ## How was this patch tested?
    
    Register `TestUncaughtExceptionHandler` to test case in 
`ScriptTransformationSuite`, then assert there is no Uncaught Exception handled.
    
    Before this patch "script transformation should not swallow errors from 
upstream operators (no serde)" and "script transformation should not swallow 
errors from upstream operators (with serde)"  throwing 
`IllegalArgumentException` and handle by `TestUncaughtExceptionHandler` .
    
    Closes #22149 from LuciferYang/fix-transformation-task-kill.
    
    Authored-by: yangjie01 <[email protected]>
    Signed-off-by: Wenchen Fan <[email protected]>
---
 .../hive/execution/ScriptTransformationExec.scala  |  5 +++-
 .../spark/sql/hive/execution/SQLQuerySuite.scala   | 33 +++++++++++++++++++++-
 .../hive/execution/ScriptTransformationSuite.scala | 32 ++++++++++++++++++++-
 .../execution/TestUncaughtExceptionHandler.scala   | 31 ++++++++++++++++++++
 4 files changed, 98 insertions(+), 3 deletions(-)

diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformationExec.scala
 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformationExec.scala
index 7b35a5f..905cb52 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformationExec.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformationExec.scala
@@ -308,12 +308,15 @@ private class ScriptTransformationWriterThread(
       }
       threwException = false
     } catch {
+      // SPARK-25158 Exception should not be thrown again, otherwise it will 
be captured by
+      // SparkUncaughtExceptionHandler, then Executor will exit because of 
this Uncaught Exception,
+      // so pass the exception to `ScriptTransformationExec` is enough.
       case t: Throwable =>
         // An error occurred while writing input, so kill the child process. 
According to the
         // Javadoc this call will not throw an exception:
         _exception = t
         proc.destroy()
-        throw t
+        logError("Thread-ScriptTransformation-Feed exit cause by: ", t)
     } finally {
       try {
         Utils.tryLogNonFatalError(outputStream.close())
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index d506edc..ce7661a 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -26,7 +26,7 @@ import java.util.{Locale, Set}
 import com.google.common.io.Files
 import org.apache.hadoop.fs.{FileSystem, Path}
 
-import org.apache.spark.TestUtils
+import org.apache.spark.{SparkException, TestUtils}
 import org.apache.spark.sql._
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.analysis.{EliminateSubqueryAliases, 
FunctionRegistry}
@@ -2348,4 +2348,35 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils 
with TestHiveSingleton {
     }
   }
 
+  test("SPARK-25158: " +
+    "Executor accidentally exit because ScriptTransformationWriterThread throw 
Exception") {
+    withTempView("test") {
+      val defaultUncaughtExceptionHandler = 
Thread.getDefaultUncaughtExceptionHandler
+      try {
+        val uncaughtExceptionHandler = new TestUncaughtExceptionHandler
+        Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler)
+
+        // Use a bad udf to generate failed inputs.
+        val badUDF = org.apache.spark.sql.functions.udf((x: Int) => {
+          if (x < 1) x
+          else throw new RuntimeException("Failed to produce data.")
+        })
+        spark
+          .range(5)
+          .select(badUDF('id).as("a"))
+          .createOrReplaceTempView("test")
+        val scriptFilePath = getTestResourcePath("data")
+        val e = intercept[SparkException] {
+          sql(
+            s"""FROM test SELECT TRANSFORM(a)
+               |USING 'python $scriptFilePath/scripts/test_transform.py "\t"'
+             """.stripMargin).collect()
+        }
+        assert(e.getMessage.contains("Failed to produce data."))
+        assert(uncaughtExceptionHandler.exception.isEmpty)
+      } finally {
+        
Thread.setDefaultUncaughtExceptionHandler(defaultUncaughtExceptionHandler)
+      }
+    }
+  }
 }
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ScriptTransformationSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ScriptTransformationSuite.scala
index 5f73b71..ed3b376 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ScriptTransformationSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ScriptTransformationSuite.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.sql.hive.execution
 
 import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe
+import org.scalatest.BeforeAndAfterEach
 import org.scalatest.exceptions.TestFailedException
 
 import org.apache.spark.{SparkException, TaskContext, TestUtils}
@@ -29,7 +30,8 @@ import org.apache.spark.sql.execution.{SparkPlan, 
SparkPlanTest, UnaryExecNode}
 import org.apache.spark.sql.hive.test.TestHiveSingleton
 import org.apache.spark.sql.types.StringType
 
-class ScriptTransformationSuite extends SparkPlanTest with TestHiveSingleton {
+class ScriptTransformationSuite extends SparkPlanTest with TestHiveSingleton 
with
+  BeforeAndAfterEach {
   import spark.implicits._
 
   private val noSerdeIOSchema = HiveScriptIOSchema(
@@ -49,6 +51,26 @@ class ScriptTransformationSuite extends SparkPlanTest with 
TestHiveSingleton {
     outputSerdeClass = Some(classOf[LazySimpleSerDe].getCanonicalName)
   )
 
+  private var defaultUncaughtExceptionHandler: Thread.UncaughtExceptionHandler 
= _
+
+  private val uncaughtExceptionHandler = new TestUncaughtExceptionHandler
+
+  protected override def beforeAll(): Unit = {
+    super.beforeAll()
+    defaultUncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler
+    Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler)
+  }
+
+  protected override def afterAll(): Unit = {
+    super.afterAll()
+    Thread.setDefaultUncaughtExceptionHandler(defaultUncaughtExceptionHandler)
+  }
+
+  override protected def afterEach(): Unit = {
+    super.afterEach()
+    uncaughtExceptionHandler.cleanStatus()
+  }
+
   test("cat without SerDe") {
     assume(TestUtils.testCommandAvailable("/bin/bash"))
 
@@ -63,6 +85,7 @@ class ScriptTransformationSuite extends SparkPlanTest with 
TestHiveSingleton {
         ioschema = noSerdeIOSchema
       ),
       rowsDf.collect())
+    assert(uncaughtExceptionHandler.exception.isEmpty)
   }
 
   test("cat with LazySimpleSerDe") {
@@ -79,6 +102,7 @@ class ScriptTransformationSuite extends SparkPlanTest with 
TestHiveSingleton {
         ioschema = serdeIOSchema
       ),
       rowsDf.collect())
+    assert(uncaughtExceptionHandler.exception.isEmpty)
   }
 
   test("script transformation should not swallow errors from upstream 
operators (no serde)") {
@@ -98,6 +122,8 @@ class ScriptTransformationSuite extends SparkPlanTest with 
TestHiveSingleton {
         rowsDf.collect())
     }
     assert(e.getMessage().contains("intentional exception"))
+    // Before SPARK-25158, uncaughtExceptionHandler will catch 
IllegalArgumentException
+    assert(uncaughtExceptionHandler.exception.isEmpty)
   }
 
   test("script transformation should not swallow errors from upstream 
operators (with serde)") {
@@ -117,6 +143,8 @@ class ScriptTransformationSuite extends SparkPlanTest with 
TestHiveSingleton {
         rowsDf.collect())
     }
     assert(e.getMessage().contains("intentional exception"))
+    // Before SPARK-25158, uncaughtExceptionHandler will catch 
IllegalArgumentException
+    assert(uncaughtExceptionHandler.exception.isEmpty)
   }
 
   test("SPARK-14400 script transformation should fail for bad script command") 
{
@@ -135,6 +163,7 @@ class ScriptTransformationSuite extends SparkPlanTest with 
TestHiveSingleton {
       SparkPlanTest.executePlan(plan, hiveContext)
     }
     assert(e.getMessage.contains("Subprocess exited with status"))
+    assert(uncaughtExceptionHandler.exception.isEmpty)
   }
 
   test("SPARK-24339 verify the result after pruning the unused columns") {
@@ -154,6 +183,7 @@ class ScriptTransformationSuite extends SparkPlanTest with 
TestHiveSingleton {
         ioschema = serdeIOSchema
       ),
       rowsDf.select("name").collect())
+    assert(uncaughtExceptionHandler.exception.isEmpty)
   }
 }
 
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/TestUncaughtExceptionHandler.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/TestUncaughtExceptionHandler.scala
new file mode 100644
index 0000000..681eb4e
--- /dev/null
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/TestUncaughtExceptionHandler.scala
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.hive.execution
+
+class TestUncaughtExceptionHandler extends Thread.UncaughtExceptionHandler {
+
+  @volatile private var _exception: Throwable = _
+
+  def exception: Option[Throwable] = Option(_exception)
+
+  def cleanStatus(): Unit = _exception = null
+
+  override def uncaughtException(t: Thread, e: Throwable): Unit = {
+    _exception = e
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to