Repository: spark
Updated Branches:
  refs/heads/branch-2.0 a32531a72 -> 7d87fc964


[SPARK-16748][SQL] SparkExceptions during planning should not wrapped in 
TreeNodeException

## What changes were proposed in this pull request?
We do not want SparkExceptions from job failures in the planning phase to 
create TreeNodeException. Hence do not wrap SparkException in TreeNodeException.

## How was this patch tested?
New unit test

Author: Tathagata Das <[email protected]>

Closes #14395 from tdas/SPARK-16748.

(cherry picked from commit bbc247548ac6faeca15afc05c266cee37ef13416)
Signed-off-by: Yin Huai <[email protected]>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/7d87fc96
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/7d87fc96
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/7d87fc96

Branch: refs/heads/branch-2.0
Commit: 7d87fc9649b141a1888b89363a8e311690d0fb56
Parents: a32531a
Author: Tathagata Das <[email protected]>
Authored: Fri Jul 29 19:59:35 2016 -0700
Committer: Yin Huai <[email protected]>
Committed: Fri Jul 29 20:00:06 2016 -0700

----------------------------------------------------------------------
 .../org/apache/spark/sql/catalyst/errors/package.scala    |  8 +++++++-
 .../test/scala/org/apache/spark/sql/SQLQuerySuite.scala   | 10 +++++++++-
 2 files changed, 16 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/7d87fc96/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
index 0420b4b..0d45f37 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
@@ -17,7 +17,10 @@
 
 package org.apache.spark.sql.catalyst
 
+import scala.util.control.NonFatal
+
 import org.apache.spark.sql.catalyst.trees.TreeNode
+import org.apache.spark.SparkException
 
 /**
  * Functions for attaching and retrieving trees that are associated with 
errors.
@@ -47,7 +50,10 @@ package object errors {
    */
   def attachTree[TreeType <: TreeNode[_], A](tree: TreeType, msg: String = 
"")(f: => A): A = {
     try f catch {
-      case e: Exception => throw new TreeNodeException(tree, msg, e)
+      // SPARK-16748: We do not want SparkExceptions from job failures in the 
planning phase
+      // to create TreeNodeException. Hence, wrap exception only if it is not 
SparkException.
+      case NonFatal(e) if !e.isInstanceOf[SparkException] =>
+        throw new TreeNodeException(tree, msg, e)
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/7d87fc96/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index d965901..b4614e6 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql
 import java.math.MathContext
 import java.sql.Timestamp
 
-import org.apache.spark.AccumulatorSuite
+import org.apache.spark.{AccumulatorSuite, SparkException}
 import org.apache.spark.sql.catalyst.analysis.UnresolvedException
 import org.apache.spark.sql.catalyst.expressions.SortOrder
 import org.apache.spark.sql.catalyst.plans.logical.Aggregate
@@ -1339,6 +1339,14 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
     checkAggregation("SELECT key + 1 + 1, COUNT(*) FROM testData GROUP BY key 
+ 1", false)
   }
 
+  testQuietly(
+    "SPARK-16748: SparkExceptions during planning should not wrapped in 
TreeNodeException") {
+    intercept[SparkException] {
+      val df = spark.range(0, 5).map(x => (1 / 
x).toString).toDF("a").orderBy("a")
+      df.queryExecution.toRdd // force physical planning, but not execution of 
the plan
+    }
+  }
+
   test("Test to check we can use Long.MinValue") {
     checkAnswer(
       sql(s"SELECT ${Long.MinValue} FROM testData ORDER BY key LIMIT 1"), 
Row(Long.MinValue)


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to