Repository: spark
Updated Branches:
  refs/heads/branch-1.2 36e15b48e -> f6ee80b18


[SPARK-5846] Correctly set job description and pool for SQL jobs

This is #4630 but modified for the 1.2 branch, because I'm guessing it makes 
sense to fix this issue in that branch (again, unless I missed something 
obvious here...)

Author: Kay Ousterhout <kayousterh...@gmail.com>

Closes #4631 from kayousterhout/SPARK-5846_1.2.1 and squashes the following 
commits:

ffe8ff2 [Kay Ousterhout] [SPARK-5846] Correctly set job description and pool 
for SQL jobs


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/f6ee80b1
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/f6ee80b1
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/f6ee80b1

Branch: refs/heads/branch-1.2
Commit: f6ee80b1885cb3822c52a4aa92ea0115c991e43f
Parents: 36e15b4
Author: Kay Ousterhout <kayousterh...@gmail.com>
Authored: Thu Feb 19 10:03:56 2015 +0800
Committer: Cheng Lian <l...@databricks.com>
Committed: Thu Feb 19 10:03:56 2015 +0800

----------------------------------------------------------------------
 .../org/apache/spark/sql/hive/thriftserver/Shim12.scala   | 10 +++++-----
 .../org/apache/spark/sql/hive/thriftserver/Shim13.scala   | 10 +++++-----
 2 files changed, 10 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/f6ee80b1/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
 
b/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
index 9258ad0..26ebc3b 100644
--- 
a/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
+++ 
b/sql/hive-thriftserver/v0.12.0/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim12.scala
@@ -186,6 +186,11 @@ private[hive] class SparkExecuteStatementOperation(
   def run(): Unit = {
     logInfo(s"Running query '$statement'")
     setState(OperationState.RUNNING)
+    val groupId = round(random * 1000000).toString
+    hiveContext.sparkContext.setJobGroup(groupId, statement)
+    sessionToActivePool.get(parentSession).foreach { pool =>
+      hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
+    }
     try {
       result = hiveContext.sql(statement)
       logDebug(result.queryExecution.toString())
@@ -196,11 +201,6 @@ private[hive] class SparkExecuteStatementOperation(
         case _ =>
       }
 
-      val groupId = round(random * 1000000).toString
-      hiveContext.sparkContext.setJobGroup(groupId, statement)
-      sessionToActivePool.get(parentSession).foreach { pool =>
-        hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
-      }
       iter = {
         val useIncrementalCollect =
           hiveContext.getConf("spark.sql.thriftServer.incrementalCollect", 
"false").toBoolean

http://git-wip-us.apache.org/repos/asf/spark/blob/f6ee80b1/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
 
b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
index 17f1ad3..5519db1 100644
--- 
a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
+++ 
b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
@@ -157,6 +157,11 @@ private[hive] class SparkExecuteStatementOperation(
   def run(): Unit = {
     logInfo(s"Running query '$statement'")
     setState(OperationState.RUNNING)
+    val groupId = round(random * 1000000).toString
+    hiveContext.sparkContext.setJobGroup(groupId, statement)
+    sessionToActivePool.get(parentSession).foreach { pool =>
+      hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
+    }
     try {
       result = hiveContext.sql(statement)
       logDebug(result.queryExecution.toString())
@@ -167,11 +172,6 @@ private[hive] class SparkExecuteStatementOperation(
         case _ =>
       }
 
-      val groupId = round(random * 1000000).toString
-      hiveContext.sparkContext.setJobGroup(groupId, statement)
-      sessionToActivePool.get(parentSession).foreach { pool =>
-        hiveContext.sparkContext.setLocalProperty("spark.scheduler.pool", pool)
-      }
       iter = {
         val useIncrementalCollect =
           hiveContext.getConf("spark.sql.thriftServer.incrementalCollect", 
"false").toBoolean


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to