Repository: spark
Updated Branches:
  refs/heads/master 4bc3bb29a -> adc830329


[SPARK-1470][SPARK-1842] Use the scala-logging wrapper instead of the directly 
sfl4j api

Author: GuoQiang Li <[email protected]>

Closes #1369 from witgo/SPARK-1470_new and squashes the following commits:

66a1641 [GuoQiang Li] IncompatibleResultTypeProblem
73a89ba [GuoQiang Li] Use the scala-logging wrapper instead of the directly 
sfl4j api.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/adc83032
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/adc83032
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/adc83032

Branch: refs/heads/master
Commit: adc8303294e26efb4ed15e5f5ba1062f7988625d
Parents: 4bc3bb2
Author: GuoQiang Li <[email protected]>
Authored: Fri Aug 1 23:55:11 2014 -0700
Committer: Patrick Wendell <[email protected]>
Committed: Fri Aug 1 23:55:11 2014 -0700

----------------------------------------------------------------------
 core/pom.xml                                    |  4 +
 .../main/scala/org/apache/spark/Logging.scala   | 39 +++++----
 .../org/apache/spark/util/SignalLogger.scala    |  2 +-
 mllib/pom.xml                                   |  4 +
 pom.xml                                         |  5 ++
 project/MimaExcludes.scala                      | 91 +++++++++++++++++++-
 sql/catalyst/pom.xml                            |  5 --
 .../spark/sql/catalyst/analysis/Analyzer.scala  |  4 +-
 .../catalyst/analysis/HiveTypeCoercion.scala    |  8 +-
 .../catalyst/expressions/BoundAttribute.scala   |  2 +-
 .../expressions/codegen/GenerateOrdering.scala  |  4 +-
 .../org/apache/spark/sql/catalyst/package.scala |  1 -
 .../sql/catalyst/planning/QueryPlanner.scala    |  2 +-
 .../spark/sql/catalyst/planning/patterns.scala  |  6 +-
 .../apache/spark/sql/catalyst/rules/Rule.scala  |  2 +-
 .../spark/sql/catalyst/rules/RuleExecutor.scala | 12 +--
 .../spark/sql/catalyst/trees/package.scala      |  8 +-
 .../scala/org/apache/spark/sql/SQLContext.scala |  2 +-
 .../compression/CompressibleColumnBuilder.scala |  5 +-
 .../apache/spark/sql/execution/Exchange.scala   |  2 +-
 .../org/apache/spark/sql/json/JsonRDD.scala     |  2 +-
 .../scala/org/apache/spark/sql/package.scala    |  2 -
 .../spark/sql/columnar/ColumnTypeSuite.scala    |  4 +-
 .../hive/thriftserver/HiveThriftServer2.scala   | 12 +--
 .../hive/thriftserver/SparkSQLCLIDriver.scala   |  2 +-
 .../sql/hive/thriftserver/SparkSQLDriver.scala  |  6 +-
 .../sql/hive/thriftserver/SparkSQLEnv.scala     |  6 +-
 .../server/SparkSQLOperationManager.scala       | 13 +--
 .../thriftserver/HiveThriftServer2Suite.scala   |  2 +-
 .../org/apache/spark/sql/hive/HiveContext.scala |  2 +-
 .../spark/sql/hive/HiveMetastoreCatalog.scala   |  3 +-
 .../org/apache/spark/sql/hive/TestHive.scala    | 10 +--
 .../org/apache/spark/sql/hive/hiveUdfs.scala    |  4 +-
 .../sql/hive/execution/HiveComparisonTest.scala | 22 ++---
 .../sql/hive/execution/HiveQueryFileTest.scala  |  2 +-
 35 files changed, 203 insertions(+), 97 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/core/pom.xml
----------------------------------------------------------------------
diff --git a/core/pom.xml b/core/pom.xml
index 7c60cf1..47766ae 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -99,6 +99,10 @@
       <artifactId>jcl-over-slf4j</artifactId>
     </dependency>
     <dependency>
+      <groupId>com.typesafe.scala-logging</groupId>
+      <artifactId>scala-logging-slf4j_${scala.binary.version}</artifactId>
+    </dependency>
+    <dependency>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
     </dependency>

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/core/src/main/scala/org/apache/spark/Logging.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/Logging.scala 
b/core/src/main/scala/org/apache/spark/Logging.scala
index 807ef3e..6e61c00 100644
--- a/core/src/main/scala/org/apache/spark/Logging.scala
+++ b/core/src/main/scala/org/apache/spark/Logging.scala
@@ -18,8 +18,9 @@
 package org.apache.spark
 
 import org.apache.log4j.{LogManager, PropertyConfigurator}
-import org.slf4j.{Logger, LoggerFactory}
+import org.slf4j.LoggerFactory
 import org.slf4j.impl.StaticLoggerBinder
+import com.typesafe.scalalogging.slf4j.Logger
 
 import org.apache.spark.annotation.DeveloperApi
 import org.apache.spark.util.Utils
@@ -39,61 +40,69 @@ trait Logging {
   // be serialized and used on another machine
   @transient private var log_ : Logger = null
 
+  // Method to get the logger name for this object
+  protected def logName = {
+    var className = this.getClass.getName
+    // Ignore trailing $'s in the class names for Scala objects
+    if (className.endsWith("$")) {
+      className = className.substring(0, className.length - 1)
+    }
+    className
+  }
+
   // Method to get or create the logger for this object
   protected def log: Logger = {
     if (log_ == null) {
       initializeIfNecessary()
-      var className = this.getClass.getName
-      // Ignore trailing $'s in the class names for Scala objects
-      log_ = LoggerFactory.getLogger(className.stripSuffix("$"))
+      log_ = Logger(LoggerFactory.getLogger(logName))
     }
     log_
   }
 
   // Log methods that take only a String
   protected def logInfo(msg: => String) {
-    if (log.isInfoEnabled) log.info(msg)
+    log.info(msg)
   }
 
   protected def logDebug(msg: => String) {
-    if (log.isDebugEnabled) log.debug(msg)
+    log.debug(msg)
   }
 
   protected def logTrace(msg: => String) {
-    if (log.isTraceEnabled) log.trace(msg)
+    log.trace(msg)
   }
 
   protected def logWarning(msg: => String) {
-    if (log.isWarnEnabled) log.warn(msg)
+    log.warn(msg)
   }
 
   protected def logError(msg: => String) {
-    if (log.isErrorEnabled) log.error(msg)
+    log.error(msg)
   }
 
   // Log methods that take Throwables (Exceptions/Errors) too
   protected def logInfo(msg: => String, throwable: Throwable) {
-    if (log.isInfoEnabled) log.info(msg, throwable)
+    log.info(msg, throwable)
   }
 
   protected def logDebug(msg: => String, throwable: Throwable) {
-    if (log.isDebugEnabled) log.debug(msg, throwable)
+    log.debug(msg, throwable)
   }
 
   protected def logTrace(msg: => String, throwable: Throwable) {
-    if (log.isTraceEnabled) log.trace(msg, throwable)
+    log.trace(msg, throwable)
   }
 
   protected def logWarning(msg: => String, throwable: Throwable) {
-    if (log.isWarnEnabled) log.warn(msg, throwable)
+    log.warn(msg, throwable)
   }
 
   protected def logError(msg: => String, throwable: Throwable) {
-    if (log.isErrorEnabled) log.error(msg, throwable)
+    log.error(msg, throwable)
   }
 
   protected def isTraceEnabled(): Boolean = {
-    log.isTraceEnabled
+    log.underlying.isTraceEnabled
   }
 
   private def initializeIfNecessary() {

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/core/src/main/scala/org/apache/spark/util/SignalLogger.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/util/SignalLogger.scala 
b/core/src/main/scala/org/apache/spark/util/SignalLogger.scala
index f77488e..e84a6b9 100644
--- a/core/src/main/scala/org/apache/spark/util/SignalLogger.scala
+++ b/core/src/main/scala/org/apache/spark/util/SignalLogger.scala
@@ -18,7 +18,7 @@
 package org.apache.spark.util
 
 import org.apache.commons.lang3.SystemUtils
-import org.slf4j.Logger
+import com.typesafe.scalalogging.slf4j.Logger
 import sun.misc.{Signal, SignalHandler}
 
 /**

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/mllib/pom.xml
----------------------------------------------------------------------
diff --git a/mllib/pom.xml b/mllib/pom.xml
index 9a33bd1..3007681 100644
--- a/mllib/pom.xml
+++ b/mllib/pom.xml
@@ -59,6 +59,10 @@
       <artifactId>breeze_${scala.binary.version}</artifactId>
       <version>0.7</version>
       <exclusions>
+        <exclusion>
+          <groupId>com.typesafe</groupId>
+          <artifactId>scalalogging-slf4j_${scala.binary.version}</artifactId>
+        </exclusion>  
         <!-- This is included as a compile-scoped dependency by jtransforms, 
which is
              a dependency of breeze. -->
         <exclusion>

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index ae97bf0..9d62cea 100644
--- a/pom.xml
+++ b/pom.xml
@@ -280,6 +280,11 @@
         <version>${slf4j.version}</version>
       </dependency>
       <dependency>
+        <groupId>com.typesafe.scala-logging</groupId>
+        <artifactId>scala-logging-slf4j_${scala.binary.version}</artifactId>
+        <version>2.1.2</version>
+      </dependency>
+      <dependency>
         <groupId>org.slf4j</groupId>
         <artifactId>jul-to-slf4j</artifactId>
         <version>${slf4j.version}</version>

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/project/MimaExcludes.scala
----------------------------------------------------------------------
diff --git a/project/MimaExcludes.scala b/project/MimaExcludes.scala
index 537ca0d..a0cee1d 100644
--- a/project/MimaExcludes.scala
+++ b/project/MimaExcludes.scala
@@ -103,14 +103,101 @@ object MimaExcludes {
             ProblemFilters.exclude[IncompatibleMethTypeProblem](
               "org.apache.spark.mllib.tree.impurity.Variance.calculate")
           ) ++
-          Seq ( // Package-private classes removed in SPARK-2341
+          Seq( // Package-private classes removed in SPARK-2341
             
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.mllib.util.BinaryLabelParser"),
             
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.mllib.util.BinaryLabelParser$"),
             
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.mllib.util.LabelParser"),
             
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.mllib.util.LabelParser$"),
             
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.mllib.util.MulticlassLabelParser"),
             
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.mllib.util.MulticlassLabelParser$")
-          )
+          ) ++
+           Seq(
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.bagel.Bagel.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.streaming.StreamingContext.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.streaming.dstream.DStream.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.mllib.recommendation.ALS.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.mllib.clustering.KMeans.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.mllib.classification.NaiveBayes.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.streaming.kafka.KafkaReceiver.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.SparkContext.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.rdd.PairRDDFunctions.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.rdd.OrderedRDDFunctions.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.rdd.SequenceFileRDDFunctions.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.rdd.DoubleRDDFunctions.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.streaming.twitter.TwitterReceiver.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.streaming.zeromq.ZeroMQReceiver.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.streaming.flume.FlumeReceiver.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.rdd.RDD.log"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.SparkConf.log"),
+
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               ("org.apache.spark.SparkConf.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.bagel.Bagel.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.streaming.StreamingContext.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.streaming.dstream.DStream.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.mllib.recommendation.ALS.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.mllib.clustering.KMeans.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.mllib.classification.NaiveBayes.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.streaming.twitter.TwitterReceiver.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.streaming.zeromq.ZeroMQReceiver.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.SparkContext.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               ("org.apache.spark.rdd.RDD.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.rdd.SequenceFileRDDFunctions.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.rdd.OrderedRDDFunctions.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.rdd.PairRDDFunctions.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.streaming.kafka.KafkaReceiver.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.rdd.DoubleRDDFunctions.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.streaming.flume.FlumeReceiver.org$apache$spark$Logging$$log__="),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.streaming.kafka.KafkaReceiver.org$apache$spark$Logging$$log_"),
+             ProblemFilters.exclude[IncompatibleMethTypeProblem]
+               
("org.apache.spark.streaming.twitter.TwitterReceiver.org$apache$spark$Logging$$log_"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               
("org.apache.spark.streaming.twitter.TwitterReceiver.org$apache$spark$Logging$$log_"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               
("org.apache.spark.streaming.zeromq.ZeroMQReceiver.org$apache$spark$Logging$$log_"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.bagel.Bagel.org$apache$spark$Logging$$log_"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               ("org.apache.spark.bagel.Bagel.org$apache$spark$Logging$$log_"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               
("org.apache.spark.streaming.flume.FlumeReceiver.org$apache$spark$Logging$$log_"),
+             ProblemFilters.exclude[IncompatibleResultTypeProblem]
+               
("org.apache.spark.streaming.kafka.KafkaReceiver.org$apache$spark$Logging$$log_")
+           )
         case v if v.startsWith("1.0") =>
           Seq(
             MimaBuild.excludeSparkPackage("api.java"),

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/catalyst/pom.xml
----------------------------------------------------------------------
diff --git a/sql/catalyst/pom.xml b/sql/catalyst/pom.xml
index 54fa96b..58d44e7 100644
--- a/sql/catalyst/pom.xml
+++ b/sql/catalyst/pom.xml
@@ -55,11 +55,6 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
-      <groupId>com.typesafe</groupId>
-      <artifactId>scalalogging-slf4j_${scala.binary.version}</artifactId>
-      <version>1.0.1</version>
-    </dependency>
-    <dependency>
       <groupId>org.scalatest</groupId>
       <artifactId>scalatest_${scala.binary.version}</artifactId>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 74c0104..2b36582 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -109,12 +109,12 @@ class Analyzer(catalog: Catalog, registry: 
FunctionRegistry, caseSensitive: Bool
   object ResolveReferences extends Rule[LogicalPlan] {
     def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
       case q: LogicalPlan if q.childrenResolved =>
-        logger.trace(s"Attempting to resolve ${q.simpleString}")
+        log.trace(s"Attempting to resolve ${q.simpleString}")
         q transformExpressions {
           case u @ UnresolvedAttribute(name) =>
             // Leave unchanged if resolution fails.  Hopefully will be 
resolved next round.
             val result = q.resolve(name).getOrElse(u)
-            logger.debug(s"Resolving $u to $result")
+            log.debug(s"Resolving $u to $result")
             result
         }
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
index 47c7ad0..eafbb70 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
@@ -75,7 +75,7 @@ trait HiveTypeCoercion {
             // Leave the same if the dataTypes match.
             case Some(newType) if a.dataType == newType.dataType => a
             case Some(newType) =>
-              logger.debug(s"Promoting $a to $newType in ${q.simpleString}}")
+              log.debug(s"Promoting $a to $newType in ${q.simpleString}}")
               newType
           }
       }
@@ -154,7 +154,7 @@ trait HiveTypeCoercion {
             (Alias(Cast(l, StringType), l.name)(), r)
 
           case (l, r) if l.dataType != r.dataType =>
-            logger.debug(s"Resolving mismatched union input ${l.dataType}, 
${r.dataType}")
+            log.debug(s"Resolving mismatched union input ${l.dataType}, 
${r.dataType}")
             findTightestCommonType(l.dataType, r.dataType).map { widestType =>
               val newLeft =
                 if (l.dataType == widestType) l else Alias(Cast(l, 
widestType), l.name)()
@@ -170,7 +170,7 @@ trait HiveTypeCoercion {
 
         val newLeft =
           if (castedLeft.map(_.dataType) != left.output.map(_.dataType)) {
-            logger.debug(s"Widening numeric types in union $castedLeft 
${left.output}")
+            log.debug(s"Widening numeric types in union $castedLeft 
${left.output}")
             Project(castedLeft, left)
           } else {
             left
@@ -178,7 +178,7 @@ trait HiveTypeCoercion {
 
         val newRight =
           if (castedRight.map(_.dataType) != right.output.map(_.dataType)) {
-            logger.debug(s"Widening numeric types in union $castedRight 
${right.output}")
+            log.debug(s"Widening numeric types in union $castedRight 
${right.output}")
             Project(castedRight, right)
           } else {
             right

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/BoundAttribute.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/BoundAttribute.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/BoundAttribute.scala
index f38f995..0913f15 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/BoundAttribute.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/BoundAttribute.scala
@@ -17,7 +17,7 @@
 
 package org.apache.spark.sql.catalyst.expressions
 
-import org.apache.spark.sql.catalyst.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.catalyst.errors.attachTree
 import org.apache.spark.sql.catalyst.types._
 import org.apache.spark.sql.catalyst.trees

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateOrdering.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateOrdering.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateOrdering.scala
index 4211998..e2552d4 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateOrdering.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateOrdering.scala
@@ -17,7 +17,7 @@
 
 package org.apache.spark.sql.catalyst.expressions.codegen
 
-import com.typesafe.scalalogging.slf4j.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.catalyst.expressions._
 import org.apache.spark.sql.catalyst.types.{StringType, NumericType}
 
@@ -92,7 +92,7 @@ object GenerateOrdering extends CodeGenerator[Seq[SortOrder], 
Ordering[Row]] wit
       }
       new $orderingName()
       """
-    logger.debug(s"Generated Ordering: $code")
+    log.debug(s"Generated Ordering: $code")
     toolBox.eval(code).asInstanceOf[Ordering[Row]]
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/package.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/package.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/package.scala
index ca96429..bdd07bb 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/package.scala
@@ -25,5 +25,4 @@ package object catalyst {
    */
   protected[catalyst] object ScalaReflectionLock
 
-  protected[catalyst] type Logging = com.typesafe.scalalogging.slf4j.Logging
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/QueryPlanner.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/QueryPlanner.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/QueryPlanner.scala
index 781ba48..5839c9f 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/QueryPlanner.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/QueryPlanner.scala
@@ -17,7 +17,7 @@
 
 package org.apache.spark.sql.catalyst.planning
 
-import org.apache.spark.sql.catalyst.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.catalyst.trees.TreeNode
 

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
index bc763a4..06c5ffe 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.catalyst.planning
 import scala.annotation.tailrec
 
 import org.apache.spark.sql.catalyst.expressions._
-import org.apache.spark.sql.catalyst.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.catalyst.plans._
 import org.apache.spark.sql.catalyst.plans.logical._
 
@@ -184,7 +184,7 @@ object ExtractEquiJoinKeys extends Logging with 
PredicateHelper {
 
   def unapply(plan: LogicalPlan): Option[ReturnType] = plan match {
     case join @ Join(left, right, joinType, condition) =>
-      logger.debug(s"Considering join on: $condition")
+      log.debug(s"Considering join on: $condition")
       // Find equi-join predicates that can be evaluated before the join, and 
thus can be used
       // as join keys.
       val (joinPredicates, otherPredicates) = 
@@ -202,7 +202,7 @@ object ExtractEquiJoinKeys extends Logging with 
PredicateHelper {
       val rightKeys = joinKeys.map(_._2)
 
       if (joinKeys.nonEmpty) {
-        logger.debug(s"leftKeys:${leftKeys} | rightKeys:${rightKeys}")
+        log.debug(s"leftKeys:${leftKeys} | rightKeys:${rightKeys}")
         Some((joinType, leftKeys, rightKeys, 
otherPredicates.reduceOption(And), left, right))
       } else {
         None

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/Rule.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/Rule.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/Rule.scala
index f8960b3..03414b2 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/Rule.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/Rule.scala
@@ -17,7 +17,7 @@
 
 package org.apache.spark.sql.catalyst.rules
 
-import org.apache.spark.sql.catalyst.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.catalyst.trees.TreeNode
 
 abstract class Rule[TreeType <: TreeNode[_]] extends Logging {

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/RuleExecutor.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/RuleExecutor.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/RuleExecutor.scala
index 6aa407c..20bf8ee 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/RuleExecutor.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/rules/RuleExecutor.scala
@@ -17,7 +17,7 @@
 
 package org.apache.spark.sql.catalyst.rules
 
-import org.apache.spark.sql.catalyst.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.catalyst.trees.TreeNode
 import org.apache.spark.sql.catalyst.util.sideBySide
 
@@ -60,7 +60,7 @@ abstract class RuleExecutor[TreeType <: TreeNode[_]] extends 
Logging {
           case (plan, rule) =>
             val result = rule(plan)
             if (!result.fastEquals(plan)) {
-              logger.trace(
+              log.trace(
                 s"""
                   |=== Applying Rule ${rule.ruleName} ===
                   |${sideBySide(plan.treeString, 
result.treeString).mkString("\n")}
@@ -73,26 +73,26 @@ abstract class RuleExecutor[TreeType <: TreeNode[_]] 
extends Logging {
         if (iteration > batch.strategy.maxIterations) {
           // Only log if this is a rule that is supposed to run more than once.
           if (iteration != 2) {
-            logger.info(s"Max iterations (${iteration - 1}) reached for batch 
${batch.name}")
+            log.info(s"Max iterations (${iteration - 1}) reached for batch 
${batch.name}")
           }
           continue = false
         }
 
         if (curPlan.fastEquals(lastPlan)) {
-          logger.trace(s"Fixed point reached for batch ${batch.name} after 
$iteration iterations.")
+          log.trace(s"Fixed point reached for batch ${batch.name} after 
$iteration iterations.")
           continue = false
         }
         lastPlan = curPlan
       }
 
       if (!batchStartPlan.fastEquals(curPlan)) {
-        logger.debug(
+        log.debug(
           s"""
           |=== Result of Batch ${batch.name} ===
           |${sideBySide(plan.treeString, curPlan.treeString).mkString("\n")}
         """.stripMargin)
       } else {
-        logger.trace(s"Batch ${batch.name} has no effect.")
+        log.trace(s"Batch ${batch.name} has no effect.")
       }
     }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/package.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/package.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/package.scala
index 9a28d03..d725a92 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/package.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/package.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.sql.catalyst
 
+import org.apache.spark.Logging
+
 /**
  * A library for easily manipulating trees of operators.  Operators that 
extend TreeNode are
  * granted the following interface:
@@ -31,8 +33,8 @@ package org.apache.spark.sql.catalyst
  *   <li>debugging support - pretty printing, easy splicing of trees, etc.</li>
  * </ul>
  */
-package object trees {
+package object trees extends Logging {
   // Since we want tree nodes to be lightweight, we create one logger for all 
treenode instances.
-  protected val logger =
-    
com.typesafe.scalalogging.slf4j.Logger(org.slf4j.LoggerFactory.getLogger("catalyst.trees"))
+  protected override def logName = "catalyst.trees"
+
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index dad7107..00dd34a 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -36,7 +36,7 @@ import org.apache.spark.sql.execution._
 import org.apache.spark.sql.execution.SparkStrategies
 import org.apache.spark.sql.json._
 import org.apache.spark.sql.parquet.ParquetRelation
-import org.apache.spark.SparkContext
+import org.apache.spark.{Logging, SparkContext}
 
 /**
  * :: AlphaComponent ::

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/core/src/main/scala/org/apache/spark/sql/columnar/compression/CompressibleColumnBuilder.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/columnar/compression/CompressibleColumnBuilder.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/columnar/compression/CompressibleColumnBuilder.scala
index 4c6675c..828a889 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/columnar/compression/CompressibleColumnBuilder.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/columnar/compression/CompressibleColumnBuilder.scala
@@ -19,7 +19,8 @@ package org.apache.spark.sql.columnar.compression
 
 import java.nio.{ByteBuffer, ByteOrder}
 
-import org.apache.spark.sql.{Logging, Row}
+import org.apache.spark.Logging
+import org.apache.spark.sql.Row
 import org.apache.spark.sql.catalyst.types.NativeType
 import org.apache.spark.sql.columnar.{ColumnBuilder, NativeColumnBuilder}
 
@@ -101,7 +102,7 @@ private[sql] trait CompressibleColumnBuilder[T <: 
NativeType]
 
     copyColumnHeader(rawBuffer, compressedBuffer)
 
-    logger.info(s"Compressor for [$columnName]: $encoder, ratio: 
${encoder.compressionRatio}")
+    log.info(s"Compressor for [$columnName]: $encoder, ratio: 
${encoder.compressionRatio}")
     encoder.compress(rawBuffer, compressedBuffer, columnType)
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
index 30712f0..0c3d537 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
@@ -101,7 +101,7 @@ private[sql] case class AddExchange(sqlContext: SQLContext) 
extends Rule[SparkPl
         !operator.requiredChildDistribution.zip(operator.children).map {
           case (required, child) =>
             val valid = child.outputPartitioning.satisfies(required)
-            logger.debug(
+            log.debug(
               s"${if (valid) "Valid" else "Invalid"} distribution," +
                 s"required: $required current: ${child.outputPartitioning}")
             valid

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/core/src/main/scala/org/apache/spark/sql/json/JsonRDD.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/json/JsonRDD.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/json/JsonRDD.scala
index 70db1eb..a3d2a1c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/json/JsonRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/json/JsonRDD.scala
@@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.analysis.HiveTypeCoercion
 import org.apache.spark.sql.catalyst.expressions._
 import org.apache.spark.sql.catalyst.types._
 import org.apache.spark.sql.catalyst.ScalaReflection
-import org.apache.spark.sql.Logging
+import org.apache.spark.Logging
 
 private[sql] object JsonRDD extends Logging {
 

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/core/src/main/scala/org/apache/spark/sql/package.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/package.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/package.scala
index 0995a4e..f513eae 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/package.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/package.scala
@@ -32,8 +32,6 @@ import org.apache.spark.annotation.DeveloperApi
  */
 package object sql {
 
-  protected[sql] type Logging = com.typesafe.scalalogging.slf4j.Logging
-
   /**
    * :: DeveloperApi ::
    *

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnTypeSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnTypeSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnTypeSuite.scala
index 8293422..a165531 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnTypeSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnTypeSuite.scala
@@ -22,7 +22,7 @@ import java.sql.Timestamp
 
 import org.scalatest.FunSuite
 
-import org.apache.spark.sql.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.catalyst.types._
 import org.apache.spark.sql.columnar.ColumnarTestUtils._
 import org.apache.spark.sql.execution.SparkSqlSerializer
@@ -166,7 +166,7 @@ class ColumnTypeSuite extends FunSuite with Logging {
 
       buffer.rewind()
       seq.foreach { expected =>
-        logger.info("buffer = " + buffer + ", expected = " + expected)
+        log.info("buffer = " + buffer + ", expected = " + expected)
         val extracted = columnType.extract(buffer)
         assert(
           expected === extracted,

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala
 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala
index ddbc2a7..5959ba3 100644
--- 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala
+++ 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala
@@ -25,7 +25,7 @@ import org.apache.hadoop.hive.ql.session.SessionState
 import org.apache.hive.service.cli.thrift.ThriftBinaryCLIService
 import org.apache.hive.service.server.{HiveServer2, ServerOptionsProcessor}
 
-import org.apache.spark.sql.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.hive.HiveContext
 import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._
 
@@ -40,7 +40,7 @@ private[hive] object HiveThriftServer2 extends Logging {
     val optionsProcessor = new ServerOptionsProcessor("HiveThriftServer2")
 
     if (!optionsProcessor.process(args)) {
-      logger.warn("Error starting HiveThriftServer2 with given arguments")
+      log.warn("Error starting HiveThriftServer2 with given arguments")
       System.exit(-1)
     }
 
@@ -49,12 +49,12 @@ private[hive] object HiveThriftServer2 extends Logging {
     // Set all properties specified via command line.
     val hiveConf: HiveConf = ss.getConf
     hiveConf.getAllProperties.toSeq.sortBy(_._1).foreach { case (k, v) =>
-      logger.debug(s"HiveConf var: $k=$v")
+      log.debug(s"HiveConf var: $k=$v")
     }
 
     SessionState.start(ss)
 
-    logger.info("Starting SparkContext")
+    log.info("Starting SparkContext")
     SparkSQLEnv.init()
     SessionState.start(ss)
 
@@ -70,10 +70,10 @@ private[hive] object HiveThriftServer2 extends Logging {
       val server = new HiveThriftServer2(SparkSQLEnv.hiveContext)
       server.init(hiveConf)
       server.start()
-      logger.info("HiveThriftServer2 started")
+      log.info("HiveThriftServer2 started")
     } catch {
       case e: Exception =>
-        logger.error("Error starting HiveThriftServer2", e)
+        log.error("Error starting HiveThriftServer2", e)
         System.exit(-1)
     }
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
index cb17d7c..4d0c506 100755
--- 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
+++ 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
@@ -37,7 +37,7 @@ import org.apache.hadoop.hive.ql.session.SessionState
 import org.apache.hadoop.hive.shims.ShimLoader
 import org.apache.thrift.transport.TSocket
 
-import org.apache.spark.sql.Logging
+import org.apache.spark.Logging
 
 private[hive] object SparkSQLCLIDriver {
   private var prompt = "spark-sql"

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala
 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala
index a56b19a..2767239 100644
--- 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala
+++ 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.metastore.api.{FieldSchema, 
Schema}
 import org.apache.hadoop.hive.ql.Driver
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse
 
-import org.apache.spark.sql.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.hive.{HiveContext, HiveMetastoreTypes}
 
 private[hive] class SparkSQLDriver(val context: HiveContext = 
SparkSQLEnv.hiveContext)
@@ -40,7 +40,7 @@ private[hive] class SparkSQLDriver(val context: HiveContext = 
SparkSQLEnv.hiveCo
 
   private def getResultSetSchema(query: context.QueryExecution): Schema = {
     val analyzed = query.analyzed
-    logger.debug(s"Result Schema: ${analyzed.output}")
+    log.debug(s"Result Schema: ${analyzed.output}")
     if (analyzed.output.size == 0) {
       new Schema(new FieldSchema("Response code", "string", "") :: Nil, null)
     } else {
@@ -61,7 +61,7 @@ private[hive] class SparkSQLDriver(val context: HiveContext = 
SparkSQLEnv.hiveCo
       new CommandProcessorResponse(0)
     } catch {
       case cause: Throwable =>
-        logger.error(s"Failed in [$command]", cause)
+        log.error(s"Failed in [$command]", cause)
         new CommandProcessorResponse(-3, 
ExceptionUtils.getFullStackTrace(cause), null)
     }
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnv.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnv.scala
 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnv.scala
index 451c3bd..dfc93b1 100644
--- 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnv.scala
+++ 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnv.scala
@@ -20,13 +20,13 @@ package org.apache.spark.sql.hive.thriftserver
 import org.apache.hadoop.hive.ql.session.SessionState
 
 import org.apache.spark.scheduler.{SplitInfo, StatsReportListener}
-import org.apache.spark.sql.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.hive.HiveContext
 import org.apache.spark.{SparkConf, SparkContext}
 
 /** A singleton object for the master program. The slaves should not access 
this. */
 private[hive] object SparkSQLEnv extends Logging {
-  logger.debug("Initializing SparkSQLEnv")
+  log.debug("Initializing SparkSQLEnv")
 
   var hiveContext: HiveContext = _
   var sparkContext: SparkContext = _
@@ -47,7 +47,7 @@ private[hive] object SparkSQLEnv extends Logging {
 
   /** Cleans up and shuts down the Spark SQL environments. */
   def stop() {
-    logger.debug("Shutting down Spark SQL Environment")
+    log.debug("Shutting down Spark SQL Environment")
     // Stop the SparkContext
     if (SparkSQLEnv.sparkContext != null) {
       sparkContext.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/server/SparkSQLOperationManager.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/server/SparkSQLOperationManager.scala
 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/server/SparkSQLOperationManager.scala
index a4e1f3e..2c6e24e 100644
--- 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/server/SparkSQLOperationManager.scala
+++ 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/server/SparkSQLOperationManager.scala
@@ -30,10 +30,11 @@ import org.apache.hive.service.cli._
 import org.apache.hive.service.cli.operation.{ExecuteStatementOperation, 
Operation, OperationManager}
 import org.apache.hive.service.cli.session.HiveSession
 
+import org.apache.spark.Logging
 import org.apache.spark.sql.catalyst.types._
 import org.apache.spark.sql.hive.thriftserver.ReflectionUtils
 import org.apache.spark.sql.hive.{HiveContext, HiveMetastoreTypes}
-import org.apache.spark.sql.{Logging, SchemaRDD, Row => SparkRow}
+import org.apache.spark.sql.{SchemaRDD, Row => SparkRow}
 
 /**
  * Executes queries using Spark SQL, and maintains a list of handles to active 
queries.
@@ -55,7 +56,7 @@ class SparkSQLOperationManager(hiveContext: HiveContext) 
extends OperationManage
 
       def close(): Unit = {
         // RDDs will be cleaned automatically upon garbage collection.
-        logger.debug("CLOSING")
+        log.debug("CLOSING")
       }
 
       def getNextRowSet(order: FetchOrientation, maxRowsL: Long): RowSet = {
@@ -112,7 +113,7 @@ class SparkSQLOperationManager(hiveContext: HiveContext) 
extends OperationManage
       }
 
       def getResultSetSchema: TableSchema = {
-        logger.warn(s"Result Schema: ${result.queryExecution.analyzed.output}")
+        log.warn(s"Result Schema: ${result.queryExecution.analyzed.output}")
         if (result.queryExecution.analyzed.output.size == 0) {
           new TableSchema(new FieldSchema("Result", "string", "") :: Nil)
         } else {
@@ -124,11 +125,11 @@ class SparkSQLOperationManager(hiveContext: HiveContext) 
extends OperationManage
       }
 
       def run(): Unit = {
-        logger.info(s"Running query '$statement'")
+        log.info(s"Running query '$statement'")
         setState(OperationState.RUNNING)
         try {
           result = hiveContext.hql(statement)
-          logger.debug(result.queryExecution.toString())
+          log.debug(result.queryExecution.toString())
           val groupId = round(random * 1000000).toString
           hiveContext.sparkContext.setJobGroup(groupId, statement)
           iter = result.queryExecution.toRdd.toLocalIterator
@@ -138,7 +139,7 @@ class SparkSQLOperationManager(hiveContext: HiveContext) 
extends OperationManage
           // Actually do need to catch Throwable as some failures don't 
inherit from Exception and
           // HiveServer will silently swallow them.
           case e: Throwable =>
-            logger.error("Error executing query:",e)
+            log.error("Error executing query:",e)
             throw new HiveSQLException(e.toString)
         }
         setState(OperationState.FINISHED)

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suite.scala
 
b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suite.scala
index fe3403b..b7b7c99 100644
--- 
a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suite.scala
+++ 
b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suite.scala
@@ -27,7 +27,7 @@ import java.sql.{Connection, DriverManager, Statement}
 
 import org.scalatest.{BeforeAndAfterAll, FunSuite}
 
-import org.apache.spark.sql.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.catalyst.util.getTempFilePath
 
 /**

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
index 7e3b872..1f31d35 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
@@ -207,7 +207,7 @@ class HiveContext(sc: SparkContext) extends SQLContext(sc) {
       }
     } catch {
       case e: Exception =>
-        logger.error(
+        log.error(
           s"""
             |======================
             |HIVE FAILURE OUTPUT

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index fa4e784..df36044 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -28,7 +28,8 @@ import org.apache.hadoop.hive.ql.plan.TableDesc
 import org.apache.hadoop.hive.serde2.Deserializer
 
 import org.apache.spark.annotation.DeveloperApi
-import org.apache.spark.sql.{SQLContext, Logging}
+import org.apache.spark.Logging
+import org.apache.spark.sql.SQLContext
 import org.apache.spark.sql.catalyst.analysis.{EliminateAnalysisOperators, 
Catalog}
 import org.apache.spark.sql.catalyst.expressions._
 import org.apache.spark.sql.catalyst.plans.logical

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
index c50e8c4..7376fb5 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
@@ -148,7 +148,7 @@ class TestHiveContext(sc: SparkContext) extends 
HiveContext(sc) {
         describedTables ++
         logical.collect { case UnresolvedRelation(databaseName, name, _) => 
name }
       val referencedTestTables = referencedTables.filter(testTables.contains)
-      logger.debug(s"Query references test tables: 
${referencedTestTables.mkString(", ")}")
+      log.debug(s"Query references test tables: 
${referencedTestTables.mkString(", ")}")
       referencedTestTables.foreach(loadTestTable)
       // Proceed with analysis.
       analyzer(logical)
@@ -273,7 +273,7 @@ class TestHiveContext(sc: SparkContext) extends 
HiveContext(sc) {
     if (!(loadedTables contains name)) {
       // Marks the table as loaded first to prevent infite mutually recursive 
table loading.
       loadedTables += name
-      logger.info(s"Loading test table $name")
+      log.info(s"Loading test table $name")
       val createCmds =
         testTables.get(name).map(_.commands).getOrElse(sys.error(s"Unknown 
test table $name"))
       createCmds.foreach(_())
@@ -312,7 +312,7 @@ class TestHiveContext(sc: SparkContext) extends 
HiveContext(sc) {
 
       loadedTables.clear()
       catalog.client.getAllTables("default").foreach { t =>
-        logger.debug(s"Deleting table $t")
+        log.debug(s"Deleting table $t")
         val table = catalog.client.getTable("default", t)
 
         catalog.client.getIndexes("default", t, 255).foreach { index =>
@@ -325,7 +325,7 @@ class TestHiveContext(sc: SparkContext) extends 
HiveContext(sc) {
       }
 
       catalog.client.getAllDatabases.filterNot(_ == "default").foreach { db =>
-        logger.debug(s"Dropping Database: $db")
+        log.debug(s"Dropping Database: $db")
         catalog.client.dropDatabase(db, true, false, true)
       }
 
@@ -347,7 +347,7 @@ class TestHiveContext(sc: SparkContext) extends 
HiveContext(sc) {
       loadTestTable("srcpart")
     } catch {
       case e: Exception =>
-        logger.error(s"FATAL ERROR: Failed to reset TestDB state. $e")
+        log.error(s"FATAL ERROR: Failed to reset TestDB state. $e")
         // At this point there is really no reason to continue, but the test 
framework traps exits.
         // So instead we just pause forever so that at least the developer can 
see where things
         // started to go wrong.

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
index 7582b47..4d8eaa1 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
@@ -25,7 +25,7 @@ import org.apache.hadoop.hive.ql.exec.{FunctionInfo, 
FunctionRegistry}
 import org.apache.hadoop.hive.ql.udf.{UDFType => HiveUDFType}
 import org.apache.hadoop.hive.ql.udf.generic._
 
-import org.apache.spark.sql.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.catalyst.analysis
 import org.apache.spark.sql.catalyst.expressions._
 import org.apache.spark.sql.catalyst.types._
@@ -119,7 +119,7 @@ private[hive] case class HiveSimpleUdf(functionClassName: 
String, children: Seq[
       sys.error(s"No matching wrapper found, options: 
${argClass.getConstructors.toSeq}."))
 
     (a: Any) => {
-      logger.debug(
+      log.debug(
         s"Wrapping $a of type ${if (a == null) "null" else a.getClass.getName} 
using $constructor.")
       // We must make sure that primitives get boxed java style.
       if (a == null) {

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
index 6c8fe4b..52cb1cf 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
@@ -21,7 +21,7 @@ import java.io._
 
 import org.scalatest.{BeforeAndAfterAll, FunSuite, GivenWhenThen}
 
-import org.apache.spark.sql.Logging
+import org.apache.spark.Logging
 import org.apache.spark.sql.catalyst.planning.PhysicalOperation
 import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.catalyst.plans.logical.{NativeCommand => 
LogicalNativeCommand}
@@ -197,7 +197,7 @@ abstract class HiveComparisonTest
     // If test sharding is enable, skip tests that are not in the correct 
shard.
     shardInfo.foreach {
       case (shardId, numShards) if testCaseName.hashCode % numShards != 
shardId => return
-      case (shardId, _) => logger.debug(s"Shard $shardId includes test 
'$testCaseName'")
+      case (shardId, _) => log.debug(s"Shard $shardId includes test 
'$testCaseName'")
     }
 
     // Skip tests found in directories specified by user.
@@ -213,13 +213,13 @@ abstract class HiveComparisonTest
         .map(new File(_, testCaseName))
         .filter(_.exists)
     if (runOnlyDirectories.nonEmpty && runIndicators.isEmpty) {
-      logger.debug(
+      log.debug(
         s"Skipping test '$testCaseName' not found in 
${runOnlyDirectories.map(_.getCanonicalPath)}")
       return
     }
 
     test(testCaseName) {
-      logger.debug(s"=== HIVE TEST: $testCaseName ===")
+      log.debug(s"=== HIVE TEST: $testCaseName ===")
 
       // Clear old output for this testcase.
       outputDirectories.map(new File(_, 
testCaseName)).filter(_.exists()).foreach(_.delete())
@@ -235,7 +235,7 @@ abstract class HiveComparisonTest
           .filterNot(_ contains "hive.outerjoin.supports.filters")
 
       if (allQueries != queryList)
-        logger.warn(s"Simplifications made on unsupported operations for test 
$testCaseName")
+        log.warn(s"Simplifications made on unsupported operations for test 
$testCaseName")
 
       lazy val consoleTestCase = {
         val quotes = "\"\"\""
@@ -257,11 +257,11 @@ abstract class HiveComparisonTest
         }
 
         val hiveCachedResults = hiveCacheFiles.flatMap { cachedAnswerFile =>
-          logger.debug(s"Looking for cached answer file $cachedAnswerFile.")
+          log.debug(s"Looking for cached answer file $cachedAnswerFile.")
           if (cachedAnswerFile.exists) {
             Some(fileToString(cachedAnswerFile))
           } else {
-            logger.debug(s"File $cachedAnswerFile not found")
+            log.debug(s"File $cachedAnswerFile not found")
             None
           }
         }.map {
@@ -272,7 +272,7 @@ abstract class HiveComparisonTest
 
         val hiveResults: Seq[Seq[String]] =
           if (hiveCachedResults.size == queryList.size) {
-            logger.info(s"Using answer cache for test: $testCaseName")
+            log.info(s"Using answer cache for test: $testCaseName")
             hiveCachedResults
           } else {
 
@@ -287,7 +287,7 @@ abstract class HiveComparisonTest
                   if (installHooksCommand.findAllMatchIn(queryString).nonEmpty)
                     sys.error("hive exec hooks not supported for tests.")
 
-                  logger.warn(s"Running query ${i+1}/${queryList.size} with 
hive.")
+                  log.warn(s"Running query ${i+1}/${queryList.size} with 
hive.")
                   // Analyze the query with catalyst to ensure test tables are 
loaded.
                   val answer = hiveQuery.analyzed match {
                     case _: ExplainCommand => Nil // No need to execute 
EXPLAIN queries as we don't check the output.
@@ -351,7 +351,7 @@ abstract class HiveComparisonTest
               val resultComparison = sideBySide(hivePrintOut, 
catalystPrintOut).mkString("\n")
 
               if (recomputeCache) {
-                logger.warn(s"Clearing cache files for failed test 
$testCaseName")
+                log.warn(s"Clearing cache files for failed test $testCaseName")
                 hiveCacheFiles.foreach(_.delete())
               }
 
@@ -380,7 +380,7 @@ abstract class HiveComparisonTest
               TestHive.runSqlHive("SELECT key FROM src")
             } catch {
               case e: Exception =>
-                logger.error(s"FATAL ERROR: Canary query threw $e This implies 
that the testing environment has likely been corrupted.")
+                log.error(s"FATAL ERROR: Canary query threw $e This implies 
that the testing environment has likely been corrupted.")
                 // The testing setup traps exits so wait here for a long time 
so the developer can see when things started
                 // to go wrong.
                 Thread.sleep(1000000)

http://git-wip-us.apache.org/repos/asf/spark/blob/adc83032/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala
index 50ab71a..9ca5575 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala
@@ -53,7 +53,7 @@ abstract class HiveQueryFileTest extends HiveComparisonTest {
   testCases.sorted.foreach {
     case (testCaseName, testCaseFile) =>
       if 
(blackList.map(_.r.pattern.matcher(testCaseName).matches()).reduceLeft(_||_)) {
-        logger.debug(s"Blacklisted test skipped $testCaseName")
+        log.debug(s"Blacklisted test skipped $testCaseName")
       } else if 
(realWhiteList.map(_.r.pattern.matcher(testCaseName).matches()).reduceLeft(_||_)
 || runAll) {
         // Build a test case and submit it to scala test framework...
         val queriesString = fileToString(testCaseFile)


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to