Repository: spark
Updated Branches:
  refs/heads/master 2182e4322 -> bbc247548


[SPARK-16748][SQL] SparkExceptions during planning should not wrapped in 
TreeNodeException

## What changes were proposed in this pull request?
We do not want SparkExceptions from job failures in the planning phase to 
create TreeNodeException. Hence do not wrap SparkException in TreeNodeException.

## How was this patch tested?
New unit test

Author: Tathagata Das <tathagata.das1...@gmail.com>

Closes #14395 from tdas/SPARK-16748.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/bbc24754
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/bbc24754
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/bbc24754

Branch: refs/heads/master
Commit: bbc247548ac6faeca15afc05c266cee37ef13416
Parents: 2182e43
Author: Tathagata Das <tathagata.das1...@gmail.com>
Authored: Fri Jul 29 19:59:35 2016 -0700
Committer: Yin Huai <yh...@databricks.com>
Committed: Fri Jul 29 19:59:35 2016 -0700

----------------------------------------------------------------------
 .../org/apache/spark/sql/catalyst/errors/package.scala    |  8 +++++++-
 .../test/scala/org/apache/spark/sql/SQLQuerySuite.scala   | 10 +++++++++-
 2 files changed, 16 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/bbc24754/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
index 0420b4b..0d45f37 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
@@ -17,7 +17,10 @@
 
 package org.apache.spark.sql.catalyst
 
+import scala.util.control.NonFatal
+
 import org.apache.spark.sql.catalyst.trees.TreeNode
+import org.apache.spark.SparkException
 
 /**
  * Functions for attaching and retrieving trees that are associated with 
errors.
@@ -47,7 +50,10 @@ package object errors {
    */
   def attachTree[TreeType <: TreeNode[_], A](tree: TreeType, msg: String = 
"")(f: => A): A = {
     try f catch {
-      case e: Exception => throw new TreeNodeException(tree, msg, e)
+      // SPARK-16748: We do not want SparkExceptions from job failures in the 
planning phase
+      // to create TreeNodeException. Hence, wrap exception only if it is not 
SparkException.
+      case NonFatal(e) if !e.isInstanceOf[SparkException] =>
+        throw new TreeNodeException(tree, msg, e)
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/bbc24754/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index d89bda1..6e485a8 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql
 import java.math.MathContext
 import java.sql.Timestamp
 
-import org.apache.spark.AccumulatorSuite
+import org.apache.spark.{AccumulatorSuite, SparkException}
 import org.apache.spark.sql.catalyst.analysis.UnresolvedException
 import org.apache.spark.sql.catalyst.expressions.SortOrder
 import org.apache.spark.sql.catalyst.plans.logical.Aggregate
@@ -1339,6 +1339,14 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
     checkAggregation("SELECT key + 1 + 1, COUNT(*) FROM testData GROUP BY key 
+ 1", false)
   }
 
+  testQuietly(
+    "SPARK-16748: SparkExceptions during planning should not wrapped in 
TreeNodeException") {
+    intercept[SparkException] {
+      val df = spark.range(0, 5).map(x => (1 / 
x).toString).toDF("a").orderBy("a")
+      df.queryExecution.toRdd // force physical planning, but not execution of 
the plan
+    }
+  }
+
   test("Test to check we can use Long.MinValue") {
     checkAnswer(
       sql(s"SELECT ${Long.MinValue} FROM testData ORDER BY key LIMIT 1"), 
Row(Long.MinValue)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to