http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala
index ddff7ce..cc649b9 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala
@@ -17,97 +17,19 @@
 
 package org.apache.spark.sql.execution.joins
 
+import org.apache.spark.sql.{DataFrame, execution, Row, SQLConf}
+import org.apache.spark.sql.catalyst.expressions.Expression
 import org.apache.spark.sql.catalyst.planning.ExtractEquiJoinKeys
 import org.apache.spark.sql.catalyst.plans.Inner
 import org.apache.spark.sql.catalyst.plans.logical.Join
-import org.apache.spark.sql.test.SQLTestUtils
-import org.apache.spark.sql.types.{IntegerType, StringType, StructType}
-import org.apache.spark.sql.{SQLConf, execution, Row, DataFrame}
-import org.apache.spark.sql.catalyst.expressions.Expression
 import org.apache.spark.sql.execution._
+import org.apache.spark.sql.test.SharedSQLContext
+import org.apache.spark.sql.types.{IntegerType, StringType, StructType}
 
-class InnerJoinSuite extends SparkPlanTest with SQLTestUtils {
-
-  private def testInnerJoin(
-      testName: String,
-      leftRows: DataFrame,
-      rightRows: DataFrame,
-      condition: Expression,
-      expectedAnswer: Seq[Product]): Unit = {
-    val join = Join(leftRows.logicalPlan, rightRows.logicalPlan, Inner, 
Some(condition))
-    ExtractEquiJoinKeys.unapply(join).foreach {
-      case (joinType, leftKeys, rightKeys, boundCondition, leftChild, 
rightChild) =>
-
-        def makeBroadcastHashJoin(left: SparkPlan, right: SparkPlan, side: 
BuildSide) = {
-          val broadcastHashJoin =
-            execution.joins.BroadcastHashJoin(leftKeys, rightKeys, side, left, 
right)
-          boundCondition.map(Filter(_, 
broadcastHashJoin)).getOrElse(broadcastHashJoin)
-        }
-
-        def makeShuffledHashJoin(left: SparkPlan, right: SparkPlan, side: 
BuildSide) = {
-          val shuffledHashJoin =
-            execution.joins.ShuffledHashJoin(leftKeys, rightKeys, side, left, 
right)
-          val filteredJoin =
-            boundCondition.map(Filter(_, 
shuffledHashJoin)).getOrElse(shuffledHashJoin)
-          EnsureRequirements(sqlContext).apply(filteredJoin)
-        }
-
-        def makeSortMergeJoin(left: SparkPlan, right: SparkPlan) = {
-          val sortMergeJoin =
-            execution.joins.SortMergeJoin(leftKeys, rightKeys, left, right)
-          val filteredJoin = boundCondition.map(Filter(_, 
sortMergeJoin)).getOrElse(sortMergeJoin)
-          EnsureRequirements(sqlContext).apply(filteredJoin)
-        }
-
-        test(s"$testName using BroadcastHashJoin (build=left)") {
-          withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
-            checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: 
SparkPlan) =>
-              makeBroadcastHashJoin(left, right, joins.BuildLeft),
-              expectedAnswer.map(Row.fromTuple),
-              sortAnswers = true)
-          }
-        }
-
-        test(s"$testName using BroadcastHashJoin (build=right)") {
-          withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
-            checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: 
SparkPlan) =>
-              makeBroadcastHashJoin(left, right, joins.BuildRight),
-              expectedAnswer.map(Row.fromTuple),
-              sortAnswers = true)
-          }
-        }
-
-        test(s"$testName using ShuffledHashJoin (build=left)") {
-          withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
-            checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: 
SparkPlan) =>
-              makeShuffledHashJoin(left, right, joins.BuildLeft),
-              expectedAnswer.map(Row.fromTuple),
-              sortAnswers = true)
-          }
-        }
-
-        test(s"$testName using ShuffledHashJoin (build=right)") {
-          withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
-            checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: 
SparkPlan) =>
-              makeShuffledHashJoin(left, right, joins.BuildRight),
-              expectedAnswer.map(Row.fromTuple),
-              sortAnswers = true)
-          }
-        }
+class InnerJoinSuite extends SparkPlanTest with SharedSQLContext {
 
-        test(s"$testName using SortMergeJoin") {
-          withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
-            checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: 
SparkPlan) =>
-              makeSortMergeJoin(left, right),
-              expectedAnswer.map(Row.fromTuple),
-              sortAnswers = true)
-          }
-        }
-    }
-  }
-
-  {
-    val upperCaseData = 
sqlContext.createDataFrame(sqlContext.sparkContext.parallelize(Seq(
+  private lazy val myUpperCaseData = ctx.createDataFrame(
+    ctx.sparkContext.parallelize(Seq(
       Row(1, "A"),
       Row(2, "B"),
       Row(3, "C"),
@@ -117,7 +39,8 @@ class InnerJoinSuite extends SparkPlanTest with SQLTestUtils 
{
       Row(null, "G")
     )), new StructType().add("N", IntegerType).add("L", StringType))
 
-    val lowerCaseData = 
sqlContext.createDataFrame(sqlContext.sparkContext.parallelize(Seq(
+  private lazy val myLowerCaseData = ctx.createDataFrame(
+    ctx.sparkContext.parallelize(Seq(
       Row(1, "a"),
       Row(2, "b"),
       Row(3, "c"),
@@ -125,21 +48,7 @@ class InnerJoinSuite extends SparkPlanTest with 
SQLTestUtils {
       Row(null, "e")
     )), new StructType().add("n", IntegerType).add("l", StringType))
 
-    testInnerJoin(
-      "inner join, one match per row",
-      upperCaseData,
-      lowerCaseData,
-      (upperCaseData.col("N") === lowerCaseData.col("n")).expr,
-      Seq(
-        (1, "A", 1, "a"),
-        (2, "B", 2, "b"),
-        (3, "C", 3, "c"),
-        (4, "D", 4, "d")
-      )
-    )
-  }
-
-  private val testData2 = Seq(
+  private lazy val myTestData = Seq(
     (1, 1),
     (1, 2),
     (2, 1),
@@ -148,14 +57,139 @@ class InnerJoinSuite extends SparkPlanTest with 
SQLTestUtils {
     (3, 2)
   ).toDF("a", "b")
 
+  // Note: the input dataframes and expression must be evaluated lazily because
+  // the SQLContext should be used only within a test to keep SQL tests stable
+  private def testInnerJoin(
+      testName: String,
+      leftRows: => DataFrame,
+      rightRows: => DataFrame,
+      condition: () => Expression,
+      expectedAnswer: Seq[Product]): Unit = {
+
+    def extractJoinParts(): Option[ExtractEquiJoinKeys.ReturnType] = {
+      val join = Join(leftRows.logicalPlan, rightRows.logicalPlan, Inner, 
Some(condition()))
+      ExtractEquiJoinKeys.unapply(join)
+    }
+
+    def makeBroadcastHashJoin(
+        leftKeys: Seq[Expression],
+        rightKeys: Seq[Expression],
+        boundCondition: Option[Expression],
+        leftPlan: SparkPlan,
+        rightPlan: SparkPlan,
+        side: BuildSide) = {
+      val broadcastHashJoin =
+        execution.joins.BroadcastHashJoin(leftKeys, rightKeys, side, leftPlan, 
rightPlan)
+      boundCondition.map(Filter(_, 
broadcastHashJoin)).getOrElse(broadcastHashJoin)
+    }
+
+    def makeShuffledHashJoin(
+        leftKeys: Seq[Expression],
+        rightKeys: Seq[Expression],
+        boundCondition: Option[Expression],
+        leftPlan: SparkPlan,
+        rightPlan: SparkPlan,
+        side: BuildSide) = {
+      val shuffledHashJoin =
+        execution.joins.ShuffledHashJoin(leftKeys, rightKeys, side, leftPlan, 
rightPlan)
+      val filteredJoin =
+        boundCondition.map(Filter(_, 
shuffledHashJoin)).getOrElse(shuffledHashJoin)
+      EnsureRequirements(sqlContext).apply(filteredJoin)
+    }
+
+    def makeSortMergeJoin(
+        leftKeys: Seq[Expression],
+        rightKeys: Seq[Expression],
+        boundCondition: Option[Expression],
+        leftPlan: SparkPlan,
+        rightPlan: SparkPlan) = {
+      val sortMergeJoin =
+        execution.joins.SortMergeJoin(leftKeys, rightKeys, leftPlan, rightPlan)
+      val filteredJoin = boundCondition.map(Filter(_, 
sortMergeJoin)).getOrElse(sortMergeJoin)
+      EnsureRequirements(sqlContext).apply(filteredJoin)
+    }
+
+    test(s"$testName using BroadcastHashJoin (build=left)") {
+      extractJoinParts().foreach { case (_, leftKeys, rightKeys, 
boundCondition, _, _) =>
+        withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
+          checkAnswer2(leftRows, rightRows, (leftPlan: SparkPlan, rightPlan: 
SparkPlan) =>
+            makeBroadcastHashJoin(
+              leftKeys, rightKeys, boundCondition, leftPlan, rightPlan, 
joins.BuildLeft),
+            expectedAnswer.map(Row.fromTuple),
+            sortAnswers = true)
+        }
+      }
+    }
+
+    test(s"$testName using BroadcastHashJoin (build=right)") {
+      extractJoinParts().foreach { case (_, leftKeys, rightKeys, 
boundCondition, _, _) =>
+        withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
+          checkAnswer2(leftRows, rightRows, (leftPlan: SparkPlan, rightPlan: 
SparkPlan) =>
+            makeBroadcastHashJoin(
+              leftKeys, rightKeys, boundCondition, leftPlan, rightPlan, 
joins.BuildRight),
+            expectedAnswer.map(Row.fromTuple),
+            sortAnswers = true)
+        }
+      }
+    }
+
+    test(s"$testName using ShuffledHashJoin (build=left)") {
+      extractJoinParts().foreach { case (_, leftKeys, rightKeys, 
boundCondition, _, _) =>
+        withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
+          checkAnswer2(leftRows, rightRows, (leftPlan: SparkPlan, rightPlan: 
SparkPlan) =>
+            makeShuffledHashJoin(
+              leftKeys, rightKeys, boundCondition, leftPlan, rightPlan, 
joins.BuildLeft),
+            expectedAnswer.map(Row.fromTuple),
+            sortAnswers = true)
+        }
+      }
+    }
+
+    test(s"$testName using ShuffledHashJoin (build=right)") {
+      extractJoinParts().foreach { case (_, leftKeys, rightKeys, 
boundCondition, _, _) =>
+        withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
+          checkAnswer2(leftRows, rightRows, (leftPlan: SparkPlan, rightPlan: 
SparkPlan) =>
+            makeShuffledHashJoin(
+              leftKeys, rightKeys, boundCondition, leftPlan, rightPlan, 
joins.BuildRight),
+            expectedAnswer.map(Row.fromTuple),
+            sortAnswers = true)
+        }
+      }
+    }
+
+    test(s"$testName using SortMergeJoin") {
+      extractJoinParts().foreach { case (_, leftKeys, rightKeys, 
boundCondition, _, _) =>
+        withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
+          checkAnswer2(leftRows, rightRows, (leftPlan: SparkPlan, rightPlan: 
SparkPlan) =>
+            makeSortMergeJoin(leftKeys, rightKeys, boundCondition, leftPlan, 
rightPlan),
+            expectedAnswer.map(Row.fromTuple),
+            sortAnswers = true)
+        }
+      }
+    }
+  }
+
+  testInnerJoin(
+    "inner join, one match per row",
+    myUpperCaseData,
+    myLowerCaseData,
+    () => (myUpperCaseData.col("N") === myLowerCaseData.col("n")).expr,
+    Seq(
+      (1, "A", 1, "a"),
+      (2, "B", 2, "b"),
+      (3, "C", 3, "c"),
+      (4, "D", 4, "d")
+    )
+  )
+
   {
-    val left = testData2.where("a = 1")
-    val right = testData2.where("a = 1")
+    lazy val left = myTestData.where("a = 1")
+    lazy val right = myTestData.where("a = 1")
     testInnerJoin(
       "inner join, multiple matches",
       left,
       right,
-      (left.col("a") === right.col("a")).expr,
+      () => (left.col("a") === right.col("a")).expr,
       Seq(
         (1, 1, 1, 1),
         (1, 1, 1, 2),
@@ -166,13 +200,13 @@ class InnerJoinSuite extends SparkPlanTest with 
SQLTestUtils {
   }
 
   {
-    val left = testData2.where("a = 1")
-    val right = testData2.where("a = 2")
+    lazy val left = myTestData.where("a = 1")
+    lazy val right = myTestData.where("a = 2")
     testInnerJoin(
       "inner join, no matches",
       left,
       right,
-      (left.col("a") === right.col("a")).expr,
+      () => (left.col("a") === right.col("a")).expr,
       Seq.empty
     )
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/OuterJoinSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/OuterJoinSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/OuterJoinSuite.scala
index e16f5e3..a1a617d 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/OuterJoinSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/OuterJoinSuite.scala
@@ -17,28 +17,65 @@
 
 package org.apache.spark.sql.execution.joins
 
+import org.apache.spark.sql.{DataFrame, Row, SQLConf}
 import org.apache.spark.sql.catalyst.planning.ExtractEquiJoinKeys
+import org.apache.spark.sql.catalyst.plans._
 import org.apache.spark.sql.catalyst.plans.logical.Join
-import org.apache.spark.sql.test.SQLTestUtils
+import org.apache.spark.sql.catalyst.expressions.{And, Expression, LessThan}
+import org.apache.spark.sql.execution.{EnsureRequirements, SparkPlan, 
SparkPlanTest}
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.sql.types.{IntegerType, DoubleType, StructType}
-import org.apache.spark.sql.{SQLConf, DataFrame, Row}
-import org.apache.spark.sql.catalyst.expressions._
-import org.apache.spark.sql.catalyst.plans._
-import org.apache.spark.sql.execution.{EnsureRequirements, joins, SparkPlan, 
SparkPlanTest}
 
-class OuterJoinSuite extends SparkPlanTest with SQLTestUtils {
+class OuterJoinSuite extends SparkPlanTest with SharedSQLContext {
+
+  private lazy val left = ctx.createDataFrame(
+    ctx.sparkContext.parallelize(Seq(
+      Row(1, 2.0),
+      Row(2, 100.0),
+      Row(2, 1.0), // This row is duplicated to ensure that we will have 
multiple buffered matches
+      Row(2, 1.0),
+      Row(3, 3.0),
+      Row(5, 1.0),
+      Row(6, 6.0),
+      Row(null, null)
+    )), new StructType().add("a", IntegerType).add("b", DoubleType))
+
+  private lazy val right = ctx.createDataFrame(
+    ctx.sparkContext.parallelize(Seq(
+      Row(0, 0.0),
+      Row(2, 3.0), // This row is duplicated to ensure that we will have 
multiple buffered matches
+      Row(2, -1.0),
+      Row(2, -1.0),
+      Row(2, 3.0),
+      Row(3, 2.0),
+      Row(4, 1.0),
+      Row(5, 3.0),
+      Row(7, 7.0),
+      Row(null, null)
+    )), new StructType().add("c", IntegerType).add("d", DoubleType))
+
+  private lazy val condition = {
+    And((left.col("a") === right.col("c")).expr,
+      LessThan(left.col("b").expr, right.col("d").expr))
+  }
 
+  // Note: the input dataframes and expression must be evaluated lazily because
+  // the SQLContext should be used only within a test to keep SQL tests stable
   private def testOuterJoin(
       testName: String,
-      leftRows: DataFrame,
-      rightRows: DataFrame,
+      leftRows: => DataFrame,
+      rightRows: => DataFrame,
       joinType: JoinType,
-      condition: Expression,
+      condition: => Expression,
       expectedAnswer: Seq[Product]): Unit = {
-    val join = Join(leftRows.logicalPlan, rightRows.logicalPlan, Inner, 
Some(condition))
-    ExtractEquiJoinKeys.unapply(join).foreach {
-      case (_, leftKeys, rightKeys, boundCondition, leftChild, rightChild) =>
-        test(s"$testName using ShuffledHashOuterJoin") {
+
+    def extractJoinParts(): Option[ExtractEquiJoinKeys.ReturnType] = {
+      val join = Join(leftRows.logicalPlan, rightRows.logicalPlan, Inner, 
Some(condition))
+      ExtractEquiJoinKeys.unapply(join)
+    }
+
+    test(s"$testName using ShuffledHashOuterJoin") {
+      extractJoinParts().foreach { case (_, leftKeys, rightKeys, 
boundCondition, _, _) =>
           withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
             checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: 
SparkPlan) =>
               EnsureRequirements(sqlContext).apply(
@@ -46,19 +83,23 @@ class OuterJoinSuite extends SparkPlanTest with 
SQLTestUtils {
               expectedAnswer.map(Row.fromTuple),
               sortAnswers = true)
           }
-        }
+      }
+    }
 
-        if (joinType != FullOuter) {
-          test(s"$testName using BroadcastHashOuterJoin") {
+    if (joinType != FullOuter) {
+      test(s"$testName using BroadcastHashOuterJoin") {
+        extractJoinParts().foreach { case (_, leftKeys, rightKeys, 
boundCondition, _, _) =>
             withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
               checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: 
SparkPlan) =>
                 BroadcastHashOuterJoin(leftKeys, rightKeys, joinType, 
boundCondition, left, right),
                 expectedAnswer.map(Row.fromTuple),
                 sortAnswers = true)
             }
-          }
+        }
+      }
 
-          test(s"$testName using SortMergeOuterJoin") {
+      test(s"$testName using SortMergeOuterJoin") {
+        extractJoinParts().foreach { case (_, leftKeys, rightKeys, 
boundCondition, _, _) =>
             withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
               checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: 
SparkPlan) =>
                 EnsureRequirements(sqlContext).apply(
@@ -66,57 +107,9 @@ class OuterJoinSuite extends SparkPlanTest with 
SQLTestUtils {
                 expectedAnswer.map(Row.fromTuple),
                 sortAnswers = false)
             }
-          }
         }
-    }
-
-    test(s"$testName using BroadcastNestedLoopJoin (build=left)") {
-      withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
-        checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) 
=>
-          joins.BroadcastNestedLoopJoin(left, right, joins.BuildLeft, 
joinType, Some(condition)),
-          expectedAnswer.map(Row.fromTuple),
-          sortAnswers = true)
       }
     }
-
-    test(s"$testName using BroadcastNestedLoopJoin (build=right)") {
-      withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
-        checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) 
=>
-          joins.BroadcastNestedLoopJoin(left, right, joins.BuildRight, 
joinType, Some(condition)),
-          expectedAnswer.map(Row.fromTuple),
-          sortAnswers = true)
-      }
-    }
-  }
-
-  val left = 
sqlContext.createDataFrame(sqlContext.sparkContext.parallelize(Seq(
-    Row(1, 2.0),
-    Row(2, 100.0),
-    Row(2, 1.0), // This row is duplicated to ensure that we will have 
multiple buffered matches
-    Row(2, 1.0),
-    Row(3, 3.0),
-    Row(5, 1.0),
-    Row(6, 6.0),
-    Row(null, null)
-  )), new StructType().add("a", IntegerType).add("b", DoubleType))
-
-  val right = 
sqlContext.createDataFrame(sqlContext.sparkContext.parallelize(Seq(
-    Row(0, 0.0),
-    Row(2, 3.0), // This row is duplicated to ensure that we will have 
multiple buffered matches
-    Row(2, -1.0),
-    Row(2, -1.0),
-    Row(2, 3.0),
-    Row(3, 2.0),
-    Row(4, 1.0),
-    Row(5, 3.0),
-    Row(7, 7.0),
-    Row(null, null)
-  )), new StructType().add("c", IntegerType).add("d", DoubleType))
-
-  val condition = {
-    And(
-      (left.col("a") === right.col("c")).expr,
-      LessThan(left.col("b").expr, right.col("d").expr))
   }
 
   // --- Basic outer joins 
------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/SemiJoinSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/SemiJoinSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/SemiJoinSuite.scala
index 4503ed2..baa86e3 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/SemiJoinSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/SemiJoinSuite.scala
@@ -17,44 +17,80 @@
 
 package org.apache.spark.sql.execution.joins
 
+import org.apache.spark.sql.{SQLConf, DataFrame, Row}
 import org.apache.spark.sql.catalyst.planning.ExtractEquiJoinKeys
 import org.apache.spark.sql.catalyst.plans.Inner
 import org.apache.spark.sql.catalyst.plans.logical.Join
-import org.apache.spark.sql.test.SQLTestUtils
-import org.apache.spark.sql.types.{DoubleType, IntegerType, StructType}
-import org.apache.spark.sql.{SQLConf, DataFrame, Row}
 import org.apache.spark.sql.catalyst.expressions.{And, LessThan, Expression}
 import org.apache.spark.sql.execution.{EnsureRequirements, SparkPlan, 
SparkPlanTest}
+import org.apache.spark.sql.test.SharedSQLContext
+import org.apache.spark.sql.types.{DoubleType, IntegerType, StructType}
+
+class SemiJoinSuite extends SparkPlanTest with SharedSQLContext {
 
-class SemiJoinSuite extends SparkPlanTest with SQLTestUtils {
+  private lazy val left = ctx.createDataFrame(
+    ctx.sparkContext.parallelize(Seq(
+      Row(1, 2.0),
+      Row(1, 2.0),
+      Row(2, 1.0),
+      Row(2, 1.0),
+      Row(3, 3.0),
+      Row(null, null),
+      Row(null, 5.0),
+      Row(6, null)
+    )), new StructType().add("a", IntegerType).add("b", DoubleType))
 
+  private lazy val right = ctx.createDataFrame(
+    ctx.sparkContext.parallelize(Seq(
+      Row(2, 3.0),
+      Row(2, 3.0),
+      Row(3, 2.0),
+      Row(4, 1.0),
+      Row(null, null),
+      Row(null, 5.0),
+      Row(6, null)
+    )), new StructType().add("c", IntegerType).add("d", DoubleType))
+
+  private lazy val condition = {
+    And((left.col("a") === right.col("c")).expr,
+      LessThan(left.col("b").expr, right.col("d").expr))
+  }
+
+  // Note: the input dataframes and expression must be evaluated lazily because
+  // the SQLContext should be used only within a test to keep SQL tests stable
   private def testLeftSemiJoin(
       testName: String,
-      leftRows: DataFrame,
-      rightRows: DataFrame,
-      condition: Expression,
+      leftRows: => DataFrame,
+      rightRows: => DataFrame,
+      condition: => Expression,
       expectedAnswer: Seq[Product]): Unit = {
-    val join = Join(leftRows.logicalPlan, rightRows.logicalPlan, Inner, 
Some(condition))
-    ExtractEquiJoinKeys.unapply(join).foreach {
-      case (joinType, leftKeys, rightKeys, boundCondition, leftChild, 
rightChild) =>
-        test(s"$testName using LeftSemiJoinHash") {
-          withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
-            checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: 
SparkPlan) =>
-              EnsureRequirements(left.sqlContext).apply(
-                LeftSemiJoinHash(leftKeys, rightKeys, left, right, 
boundCondition)),
-              expectedAnswer.map(Row.fromTuple),
-              sortAnswers = true)
-          }
+
+    def extractJoinParts(): Option[ExtractEquiJoinKeys.ReturnType] = {
+      val join = Join(leftRows.logicalPlan, rightRows.logicalPlan, Inner, 
Some(condition))
+      ExtractEquiJoinKeys.unapply(join)
+    }
+
+    test(s"$testName using LeftSemiJoinHash") {
+      extractJoinParts().foreach { case (joinType, leftKeys, rightKeys, 
boundCondition, _, _) =>
+        withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
+          checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: 
SparkPlan) =>
+            EnsureRequirements(left.sqlContext).apply(
+              LeftSemiJoinHash(leftKeys, rightKeys, left, right, 
boundCondition)),
+            expectedAnswer.map(Row.fromTuple),
+            sortAnswers = true)
         }
+      }
+    }
 
-        test(s"$testName using BroadcastLeftSemiJoinHash") {
-          withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
-            checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: 
SparkPlan) =>
-              BroadcastLeftSemiJoinHash(leftKeys, rightKeys, left, right, 
boundCondition),
-              expectedAnswer.map(Row.fromTuple),
-              sortAnswers = true)
-          }
+    test(s"$testName using BroadcastLeftSemiJoinHash") {
+      extractJoinParts().foreach { case (joinType, leftKeys, rightKeys, 
boundCondition, _, _) =>
+        withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
+          checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: 
SparkPlan) =>
+            BroadcastLeftSemiJoinHash(leftKeys, rightKeys, left, right, 
boundCondition),
+            expectedAnswer.map(Row.fromTuple),
+            sortAnswers = true)
         }
+      }
     }
 
     test(s"$testName using LeftSemiJoinBNL") {
@@ -67,33 +103,6 @@ class SemiJoinSuite extends SparkPlanTest with SQLTestUtils 
{
     }
   }
 
-  val left = 
sqlContext.createDataFrame(sqlContext.sparkContext.parallelize(Seq(
-    Row(1, 2.0),
-    Row(1, 2.0),
-    Row(2, 1.0),
-    Row(2, 1.0),
-    Row(3, 3.0),
-    Row(null, null),
-    Row(null, 5.0),
-    Row(6, null)
-  )), new StructType().add("a", IntegerType).add("b", DoubleType))
-
-  val right = 
sqlContext.createDataFrame(sqlContext.sparkContext.parallelize(Seq(
-    Row(2, 3.0),
-    Row(2, 3.0),
-    Row(3, 2.0),
-    Row(4, 1.0),
-    Row(null, null),
-    Row(null, 5.0),
-    Row(6, null)
-  )), new StructType().add("c", IntegerType).add("d", DoubleType))
-
-  val condition = {
-    And(
-      (left.col("a") === right.col("c")).expr,
-      LessThan(left.col("b").expr, right.col("d").expr))
-  }
-
   testLeftSemiJoin(
     "basic test",
     left,

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
index 7383d3f..80006bf 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala
@@ -28,17 +28,15 @@ import org.apache.spark.SparkFunSuite
 import org.apache.spark.sql._
 import org.apache.spark.sql.execution.ui.SparkPlanGraph
 import org.apache.spark.sql.functions._
-import org.apache.spark.sql.test.{SQLTestUtils, TestSQLContext}
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.util.Utils
 
-class SQLMetricsSuite extends SparkFunSuite with SQLTestUtils {
 
-  override val sqlContext = TestSQLContext
-
-  import sqlContext.implicits._
+class SQLMetricsSuite extends SparkFunSuite with SharedSQLContext {
+  import testImplicits._
 
   test("LongSQLMetric should not box Long") {
-    val l = SQLMetrics.createLongMetric(TestSQLContext.sparkContext, "long")
+    val l = SQLMetrics.createLongMetric(ctx.sparkContext, "long")
     val f = () => {
       l += 1L
       l.add(1L)
@@ -52,7 +50,7 @@ class SQLMetricsSuite extends SparkFunSuite with SQLTestUtils 
{
 
   test("Normal accumulator should do boxing") {
     // We need this test to make sure BoxingFinder works.
-    val l = TestSQLContext.sparkContext.accumulator(0L)
+    val l = ctx.sparkContext.accumulator(0L)
     val f = () => { l += 1L }
     BoxingFinder.getClassReader(f.getClass).foreach { cl =>
       val boxingFinder = new BoxingFinder()
@@ -73,19 +71,19 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
       df: DataFrame,
       expectedNumOfJobs: Int,
       expectedMetrics: Map[Long, (String, Map[String, Any])]): Unit = {
-    val previousExecutionIds = TestSQLContext.listener.executionIdToData.keySet
+    val previousExecutionIds = ctx.listener.executionIdToData.keySet
     df.collect()
-    TestSQLContext.sparkContext.listenerBus.waitUntilEmpty(10000)
-    val executionIds = 
TestSQLContext.listener.executionIdToData.keySet.diff(previousExecutionIds)
+    ctx.sparkContext.listenerBus.waitUntilEmpty(10000)
+    val executionIds = 
ctx.listener.executionIdToData.keySet.diff(previousExecutionIds)
     assert(executionIds.size === 1)
     val executionId = executionIds.head
-    val jobs = TestSQLContext.listener.getExecution(executionId).get.jobs
+    val jobs = ctx.listener.getExecution(executionId).get.jobs
     // Use "<=" because there is a race condition that we may miss some jobs
     // TODO Change it to "=" once we fix the race condition that missing the 
JobStarted event.
     assert(jobs.size <= expectedNumOfJobs)
     if (jobs.size == expectedNumOfJobs) {
       // If we can track all jobs, check the metric values
-      val metricValues = 
TestSQLContext.listener.getExecutionMetrics(executionId)
+      val metricValues = ctx.listener.getExecutionMetrics(executionId)
       val actualMetrics = 
SparkPlanGraph(df.queryExecution.executedPlan).nodes.filter { node =>
         expectedMetrics.contains(node.id)
       }.map { node =>
@@ -111,7 +109,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
       SQLConf.TUNGSTEN_ENABLED.key -> "false") {
       // Assume the execution plan is
       // PhysicalRDD(nodeId = 1) -> Project(nodeId = 0)
-      val df = TestData.person.select('name)
+      val df = person.select('name)
       testSparkPlanMetrics(df, 1, Map(
         0L ->("Project", Map(
           "number of rows" -> 2L)))
@@ -126,7 +124,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
       SQLConf.TUNGSTEN_ENABLED.key -> "true") {
       // Assume the execution plan is
       // PhysicalRDD(nodeId = 1) -> TungstenProject(nodeId = 0)
-      val df = TestData.person.select('name)
+      val df = person.select('name)
       testSparkPlanMetrics(df, 1, Map(
         0L ->("TungstenProject", Map(
           "number of rows" -> 2L)))
@@ -137,7 +135,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
   test("Filter metrics") {
     // Assume the execution plan is
     // PhysicalRDD(nodeId = 1) -> Filter(nodeId = 0)
-    val df = TestData.person.filter('age < 25)
+    val df = person.filter('age < 25)
     testSparkPlanMetrics(df, 1, Map(
       0L -> ("Filter", Map(
         "number of input rows" -> 2L,
@@ -152,7 +150,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
       SQLConf.TUNGSTEN_ENABLED.key -> "false") {
       // Assume the execution plan is
       // ... -> Aggregate(nodeId = 2) -> TungstenExchange(nodeId = 1) -> 
Aggregate(nodeId = 0)
-      val df = TestData.testData2.groupBy().count() // 2 partitions
+      val df = testData2.groupBy().count() // 2 partitions
       testSparkPlanMetrics(df, 1, Map(
         2L -> ("Aggregate", Map(
           "number of input rows" -> 6L,
@@ -163,7 +161,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
       )
 
       // 2 partitions and each partition contains 2 keys
-      val df2 = TestData.testData2.groupBy('a).count()
+      val df2 = testData2.groupBy('a).count()
       testSparkPlanMetrics(df2, 1, Map(
         2L -> ("Aggregate", Map(
           "number of input rows" -> 6L,
@@ -185,7 +183,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
       // Assume the execution plan is
       // ... -> SortBasedAggregate(nodeId = 2) -> TungstenExchange(nodeId = 1) 
->
       // SortBasedAggregate(nodeId = 0)
-      val df = TestData.testData2.groupBy().count() // 2 partitions
+      val df = testData2.groupBy().count() // 2 partitions
       testSparkPlanMetrics(df, 1, Map(
         2L -> ("SortBasedAggregate", Map(
           "number of input rows" -> 6L,
@@ -199,7 +197,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
       // ... -> SortBasedAggregate(nodeId = 3) -> TungstenExchange(nodeId = 2)
       // -> ExternalSort(nodeId = 1)-> SortBasedAggregate(nodeId = 0)
       // 2 partitions and each partition contains 2 keys
-      val df2 = TestData.testData2.groupBy('a).count()
+      val df2 = testData2.groupBy('a).count()
       testSparkPlanMetrics(df2, 1, Map(
         3L -> ("SortBasedAggregate", Map(
           "number of input rows" -> 6L,
@@ -219,7 +217,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
       // Assume the execution plan is
       // ... -> TungstenAggregate(nodeId = 2) -> Exchange(nodeId = 1)
       // -> TungstenAggregate(nodeId = 0)
-      val df = TestData.testData2.groupBy().count() // 2 partitions
+      val df = testData2.groupBy().count() // 2 partitions
       testSparkPlanMetrics(df, 1, Map(
         2L -> ("TungstenAggregate", Map(
           "number of input rows" -> 6L,
@@ -230,7 +228,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
       )
 
       // 2 partitions and each partition contains 2 keys
-      val df2 = TestData.testData2.groupBy('a).count()
+      val df2 = testData2.groupBy('a).count()
       testSparkPlanMetrics(df2, 1, Map(
         2L -> ("TungstenAggregate", Map(
           "number of input rows" -> 6L,
@@ -246,7 +244,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
     // Because SortMergeJoin may skip different rows if the number of 
partitions is different, this
     // test should use the deterministic number of partitions.
     withSQLConf(SQLConf.SORTMERGE_JOIN.key -> "true") {
-      val testDataForJoin = TestData.testData2.filter('a < 2) // TestData2(1, 
1) :: TestData2(1, 2)
+      val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: 
TestData2(1, 2)
       testDataForJoin.registerTempTable("testDataForJoin")
       withTempTable("testDataForJoin") {
         // Assume the execution plan is
@@ -268,7 +266,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
     // Because SortMergeOuterJoin may skip different rows if the number of 
partitions is different,
     // this test should use the deterministic number of partitions.
     withSQLConf(SQLConf.SORTMERGE_JOIN.key -> "true") {
-      val testDataForJoin = TestData.testData2.filter('a < 2) // TestData2(1, 
1) :: TestData2(1, 2)
+      val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: 
TestData2(1, 2)
       testDataForJoin.registerTempTable("testDataForJoin")
       withTempTable("testDataForJoin") {
         // Assume the execution plan is
@@ -314,7 +312,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
 
   test("ShuffledHashJoin metrics") {
     withSQLConf(SQLConf.SORTMERGE_JOIN.key -> "false") {
-      val testDataForJoin = TestData.testData2.filter('a < 2) // TestData2(1, 
1) :: TestData2(1, 2)
+      val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: 
TestData2(1, 2)
       testDataForJoin.registerTempTable("testDataForJoin")
       withTempTable("testDataForJoin") {
         // Assume the execution plan is
@@ -390,7 +388,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
 
   test("BroadcastNestedLoopJoin metrics") {
     withSQLConf(SQLConf.SORTMERGE_JOIN.key -> "true") {
-      val testDataForJoin = TestData.testData2.filter('a < 2) // TestData2(1, 
1) :: TestData2(1, 2)
+      val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: 
TestData2(1, 2)
       testDataForJoin.registerTempTable("testDataForJoin")
       withTempTable("testDataForJoin") {
         // Assume the execution plan is
@@ -458,7 +456,7 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
   }
 
   test("CartesianProduct metrics") {
-    val testDataForJoin = TestData.testData2.filter('a < 2) // TestData2(1, 1) 
:: TestData2(1, 2)
+    val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: 
TestData2(1, 2)
     testDataForJoin.registerTempTable("testDataForJoin")
     withTempTable("testDataForJoin") {
       // Assume the execution plan is
@@ -476,19 +474,19 @@ class SQLMetricsSuite extends SparkFunSuite with 
SQLTestUtils {
 
   test("save metrics") {
     withTempPath { file =>
-      val previousExecutionIds = 
TestSQLContext.listener.executionIdToData.keySet
+      val previousExecutionIds = ctx.listener.executionIdToData.keySet
       // Assume the execution plan is
       // PhysicalRDD(nodeId = 0)
-      
TestData.person.select('name).write.format("json").save(file.getAbsolutePath)
-      TestSQLContext.sparkContext.listenerBus.waitUntilEmpty(10000)
-      val executionIds = 
TestSQLContext.listener.executionIdToData.keySet.diff(previousExecutionIds)
+      person.select('name).write.format("json").save(file.getAbsolutePath)
+      ctx.sparkContext.listenerBus.waitUntilEmpty(10000)
+      val executionIds = 
ctx.listener.executionIdToData.keySet.diff(previousExecutionIds)
       assert(executionIds.size === 1)
       val executionId = executionIds.head
-      val jobs = TestSQLContext.listener.getExecution(executionId).get.jobs
+      val jobs = ctx.listener.getExecution(executionId).get.jobs
       // Use "<=" because there is a race condition that we may miss some jobs
       // TODO Change "<=" to "=" once we fix the race condition that missing 
the JobStarted event.
       assert(jobs.size <= 1)
-      val metricValues = 
TestSQLContext.listener.getExecutionMetrics(executionId)
+      val metricValues = ctx.listener.getExecutionMetrics(executionId)
       // Because "save" will create a new DataFrame internally, we cannot get 
the real metric id.
       // However, we still can check the value.
       assert(metricValues.values.toSeq === Seq(2L))

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
index 41dd189..80d1e88 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
@@ -25,12 +25,12 @@ import 
org.apache.spark.sql.execution.metric.LongSQLMetricValue
 import org.apache.spark.scheduler._
 import org.apache.spark.sql.{DataFrame, SQLContext}
 import org.apache.spark.sql.execution.SQLExecution
-import org.apache.spark.sql.test.TestSQLContext
+import org.apache.spark.sql.test.SharedSQLContext
 
-class SQLListenerSuite extends SparkFunSuite {
+class SQLListenerSuite extends SparkFunSuite with SharedSQLContext {
+  import testImplicits._
 
   private def createTestDataFrame: DataFrame = {
-    import TestSQLContext.implicits._
     Seq(
       (1, 1),
       (2, 2)
@@ -74,7 +74,7 @@ class SQLListenerSuite extends SparkFunSuite {
   }
 
   test("basic") {
-    val listener = new SQLListener(TestSQLContext)
+    val listener = new SQLListener(ctx)
     val executionId = 0
     val df = createTestDataFrame
     val accumulatorIds =
@@ -212,7 +212,7 @@ class SQLListenerSuite extends SparkFunSuite {
   }
 
   test("onExecutionEnd happens before onJobEnd(JobSucceeded)") {
-    val listener = new SQLListener(TestSQLContext)
+    val listener = new SQLListener(ctx)
     val executionId = 0
     val df = createTestDataFrame
     listener.onExecutionStart(
@@ -241,7 +241,7 @@ class SQLListenerSuite extends SparkFunSuite {
   }
 
   test("onExecutionEnd happens before multiple onJobEnd(JobSucceeded)s") {
-    val listener = new SQLListener(TestSQLContext)
+    val listener = new SQLListener(ctx)
     val executionId = 0
     val df = createTestDataFrame
     listener.onExecutionStart(
@@ -281,7 +281,7 @@ class SQLListenerSuite extends SparkFunSuite {
   }
 
   test("onExecutionEnd happens before onJobEnd(JobFailed)") {
-    val listener = new SQLListener(TestSQLContext)
+    val listener = new SQLListener(ctx)
     val executionId = 0
     val df = createTestDataFrame
     listener.onExecutionStart(

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
index e4dcf4c..0edac08 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
@@ -25,10 +25,13 @@ import org.h2.jdbc.JdbcSQLException
 import org.scalatest.BeforeAndAfter
 
 import org.apache.spark.SparkFunSuite
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.sql.types._
 import org.apache.spark.util.Utils
 
-class JDBCSuite extends SparkFunSuite with BeforeAndAfter {
+class JDBCSuite extends SparkFunSuite with BeforeAndAfter with 
SharedSQLContext {
+  import testImplicits._
+
   val url = "jdbc:h2:mem:testdb0"
   val urlWithUserAndPass = 
"jdbc:h2:mem:testdb0;user=testUser;password=testPass"
   var conn: java.sql.Connection = null
@@ -42,10 +45,6 @@ class JDBCSuite extends SparkFunSuite with BeforeAndAfter {
       Some(StringType)
   }
 
-  private lazy val ctx = org.apache.spark.sql.test.TestSQLContext
-  import ctx.implicits._
-  import ctx.sql
-
   before {
     Utils.classForName("org.h2.Driver")
     // Extra properties that will be specified for our database. We need these 
to test

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala
index 84b52ca..5dc3a2c 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala
@@ -23,11 +23,13 @@ import java.util.Properties
 import org.scalatest.BeforeAndAfter
 
 import org.apache.spark.SparkFunSuite
-import org.apache.spark.sql.{SaveMode, Row}
+import org.apache.spark.sql.{Row, SaveMode}
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.sql.types._
 import org.apache.spark.util.Utils
 
-class JDBCWriteSuite extends SparkFunSuite with BeforeAndAfter {
+class JDBCWriteSuite extends SparkFunSuite with BeforeAndAfter with 
SharedSQLContext {
+
   val url = "jdbc:h2:mem:testdb2"
   var conn: java.sql.Connection = null
   val url1 = "jdbc:h2:mem:testdb3"
@@ -37,10 +39,6 @@ class JDBCWriteSuite extends SparkFunSuite with 
BeforeAndAfter {
   properties.setProperty("password", "testPass")
   properties.setProperty("rowId", "false")
 
-  private lazy val ctx = org.apache.spark.sql.test.TestSQLContext
-  import ctx.implicits._
-  import ctx.sql
-
   before {
     Utils.classForName("org.h2.Driver")
     conn = DriverManager.getConnection(url)
@@ -58,14 +56,14 @@ class JDBCWriteSuite extends SparkFunSuite with 
BeforeAndAfter {
       "create table test.people1 (name TEXT(32) NOT NULL, theid INTEGER NOT 
NULL)").executeUpdate()
     conn1.commit()
 
-    ctx.sql(
+    sql(
       s"""
         |CREATE TEMPORARY TABLE PEOPLE
         |USING org.apache.spark.sql.jdbc
         |OPTIONS (url '$url1', dbtable 'TEST.PEOPLE', user 'testUser', 
password 'testPass')
       """.stripMargin.replaceAll("\n", " "))
 
-    ctx.sql(
+    sql(
       s"""
         |CREATE TEMPORARY TABLE PEOPLE1
         |USING org.apache.spark.sql.jdbc
@@ -144,14 +142,14 @@ class JDBCWriteSuite extends SparkFunSuite with 
BeforeAndAfter {
   }
 
   test("INSERT to JDBC Datasource") {
-    ctx.sql("INSERT INTO TABLE PEOPLE1 SELECT * FROM PEOPLE")
+    sql("INSERT INTO TABLE PEOPLE1 SELECT * FROM PEOPLE")
     assert(2 === ctx.read.jdbc(url1, "TEST.PEOPLE1", properties).count)
     assert(2 === ctx.read.jdbc(url1, "TEST.PEOPLE1", 
properties).collect()(0).length)
   }
 
   test("INSERT to JDBC Datasource with overwrite") {
-    ctx.sql("INSERT INTO TABLE PEOPLE1 SELECT * FROM PEOPLE")
-    ctx.sql("INSERT OVERWRITE TABLE PEOPLE1 SELECT * FROM PEOPLE")
+    sql("INSERT INTO TABLE PEOPLE1 SELECT * FROM PEOPLE")
+    sql("INSERT OVERWRITE TABLE PEOPLE1 SELECT * FROM PEOPLE")
     assert(2 === ctx.read.jdbc(url1, "TEST.PEOPLE1", properties).count)
     assert(2 === ctx.read.jdbc(url1, "TEST.PEOPLE1", 
properties).collect()(0).length)
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
index 562c279..9bc3f6b 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
@@ -19,28 +19,32 @@ package org.apache.spark.sql.sources
 
 import java.io.{File, IOException}
 
-import org.scalatest.BeforeAndAfterAll
+import org.scalatest.BeforeAndAfter
 
 import org.apache.spark.sql.AnalysisException
 import org.apache.spark.sql.execution.datasources.DDLException
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.util.Utils
 
-class CreateTableAsSelectSuite extends DataSourceTest with BeforeAndAfterAll {
-
-  import caseInsensitiveContext.sql
 
+class CreateTableAsSelectSuite extends DataSourceTest with SharedSQLContext 
with BeforeAndAfter {
+  protected override lazy val sql = caseInsensitiveContext.sql _
   private lazy val sparkContext = caseInsensitiveContext.sparkContext
-
-  var path: File = null
+  private var path: File = null
 
   override def beforeAll(): Unit = {
+    super.beforeAll()
     path = Utils.createTempDir()
     val rdd = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, 
"b":"str${i}"}"""))
     caseInsensitiveContext.read.json(rdd).registerTempTable("jt")
   }
 
   override def afterAll(): Unit = {
-    caseInsensitiveContext.dropTempTable("jt")
+    try {
+      caseInsensitiveContext.dropTempTable("jt")
+    } finally {
+      super.afterAll()
+    }
   }
 
   after {

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/sources/DDLSourceLoadSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/DDLSourceLoadSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/DDLSourceLoadSuite.scala
index 392da0b..853707c 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/DDLSourceLoadSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/DDLSourceLoadSuite.scala
@@ -18,11 +18,12 @@
 package org.apache.spark.sql.sources
 
 import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.sql.types.{StringType, StructField, StructType}
 
 
 // please note that the META-INF/services had to be modified for the test 
directory for this to work
-class DDLSourceLoadSuite extends DataSourceTest {
+class DDLSourceLoadSuite extends DataSourceTest with SharedSQLContext {
 
   test("data sources with the same name") {
     intercept[RuntimeException] {

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/sources/DDLTestSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/DDLTestSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/DDLTestSuite.scala
index 84855ce..5f8514e 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/DDLTestSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/DDLTestSuite.scala
@@ -20,6 +20,7 @@ package org.apache.spark.sql.sources
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql._
 import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.sql.types._
 import org.apache.spark.unsafe.types.UTF8String
 
@@ -68,10 +69,12 @@ case class SimpleDDLScan(from: Int, to: Int, table: 
String)(@transient val sqlCo
   }
 }
 
-class DDLTestSuite extends DataSourceTest {
+class DDLTestSuite extends DataSourceTest with SharedSQLContext {
+  protected override lazy val sql = caseInsensitiveContext.sql _
 
-  before {
-    caseInsensitiveContext.sql(
+  override def beforeAll(): Unit = {
+    super.beforeAll()
+    sql(
       """
       |CREATE TEMPORARY TABLE ddlPeople
       |USING org.apache.spark.sql.sources.DDLScanSource
@@ -105,7 +108,7 @@ class DDLTestSuite extends DataSourceTest {
       ))
 
   test("SPARK-7686 DescribeCommand should have correct physical plan output 
attributes") {
-    val attributes = caseInsensitiveContext.sql("describe ddlPeople")
+    val attributes = sql("describe ddlPeople")
       .queryExecution.executedPlan.output
     assert(attributes.map(_.name) === Seq("col_name", "data_type", "comment"))
     assert(attributes.map(_.dataType).toSet === Set(StringType))

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceTest.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceTest.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceTest.scala
index 00cc7d5..d74d29f 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceTest.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceTest.scala
@@ -17,18 +17,23 @@
 
 package org.apache.spark.sql.sources
 
-import org.scalatest.BeforeAndAfter
-
 import org.apache.spark.sql._
-import org.apache.spark.sql.test.TestSQLContext
 
 
-abstract class DataSourceTest extends QueryTest with BeforeAndAfter {
+private[sql] abstract class DataSourceTest extends QueryTest {
+  protected def _sqlContext: SQLContext
+
   // We want to test some edge cases.
-  protected implicit lazy val caseInsensitiveContext = {
-    val ctx = new SQLContext(TestSQLContext.sparkContext)
+  protected lazy val caseInsensitiveContext: SQLContext = {
+    val ctx = new SQLContext(_sqlContext.sparkContext)
     ctx.setConf(SQLConf.CASE_SENSITIVE, false)
     ctx
   }
 
+  protected def sqlTest(sqlString: String, expectedAnswer: Seq[Row]) {
+    test(sqlString) {
+      checkAnswer(caseInsensitiveContext.sql(sqlString), expectedAnswer)
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/sources/FilteredScanSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/FilteredScanSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/FilteredScanSuite.scala
index 5ef3657..c81c3d3 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/FilteredScanSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/FilteredScanSuite.scala
@@ -21,6 +21,7 @@ import scala.language.existentials
 
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql._
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.sql.types._
 
 
@@ -96,11 +97,11 @@ object FiltersPushed {
   var list: Seq[Filter] = Nil
 }
 
-class FilteredScanSuite extends DataSourceTest {
+class FilteredScanSuite extends DataSourceTest with SharedSQLContext {
+  protected override lazy val sql = caseInsensitiveContext.sql _
 
-  import caseInsensitiveContext.sql
-
-  before {
+  override def beforeAll(): Unit = {
+    super.beforeAll()
     sql(
       """
         |CREATE TEMPORARY TABLE oneToTenFiltered

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
index cdbfaf6..78bd3e5 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
@@ -19,20 +19,17 @@ package org.apache.spark.sql.sources
 
 import java.io.File
 
-import org.scalatest.BeforeAndAfterAll
-
 import org.apache.spark.sql.{SaveMode, AnalysisException, Row}
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.util.Utils
 
-class InsertSuite extends DataSourceTest with BeforeAndAfterAll {
-
-  import caseInsensitiveContext.sql
-
+class InsertSuite extends DataSourceTest with SharedSQLContext {
+  protected override lazy val sql = caseInsensitiveContext.sql _
   private lazy val sparkContext = caseInsensitiveContext.sparkContext
-
-  var path: File = null
+  private var path: File = null
 
   override def beforeAll(): Unit = {
+    super.beforeAll()
     path = Utils.createTempDir()
     val rdd = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, 
"b":"str$i"}"""))
     caseInsensitiveContext.read.json(rdd).registerTempTable("jt")
@@ -47,9 +44,13 @@ class InsertSuite extends DataSourceTest with 
BeforeAndAfterAll {
   }
 
   override def afterAll(): Unit = {
-    caseInsensitiveContext.dropTempTable("jsonTable")
-    caseInsensitiveContext.dropTempTable("jt")
-    Utils.deleteRecursively(path)
+    try {
+      caseInsensitiveContext.dropTempTable("jsonTable")
+      caseInsensitiveContext.dropTempTable("jt")
+      Utils.deleteRecursively(path)
+    } finally {
+      super.afterAll()
+    }
   }
 
   test("Simple INSERT OVERWRITE a JSONRelation") {
@@ -221,9 +222,10 @@ class InsertSuite extends DataSourceTest with 
BeforeAndAfterAll {
       sql("SELECT a * 2 FROM jsonTable"),
       (1 to 10).map(i => Row(i * 2)).toSeq)
 
-    assertCached(sql("SELECT x.a, y.a FROM jsonTable x JOIN jsonTable y ON x.a 
= y.a + 1"), 2)
-    checkAnswer(
-      sql("SELECT x.a, y.a FROM jsonTable x JOIN jsonTable y ON x.a = y.a + 
1"),
+    assertCached(sql(
+      "SELECT x.a, y.a FROM jsonTable x JOIN jsonTable y ON x.a = y.a + 1"), 2)
+    checkAnswer(sql(
+      "SELECT x.a, y.a FROM jsonTable x JOIN jsonTable y ON x.a = y.a + 1"),
       (2 to 10).map(i => Row(i, i - 1)).toSeq)
 
     // Insert overwrite and keep the same schema.

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala
index c86ddd7..79b6e9b 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala
@@ -19,21 +19,21 @@ package org.apache.spark.sql.sources
 
 import org.apache.spark.sql.{Row, QueryTest}
 import org.apache.spark.sql.functions._
-import org.apache.spark.sql.test.TestSQLContext
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.util.Utils
 
-class PartitionedWriteSuite extends QueryTest {
-  import TestSQLContext.implicits._
+class PartitionedWriteSuite extends QueryTest with SharedSQLContext {
+  import testImplicits._
 
   test("write many partitions") {
     val path = Utils.createTempDir()
     path.delete()
 
-    val df = TestSQLContext.range(100).select($"id", lit(1).as("data"))
+    val df = ctx.range(100).select($"id", lit(1).as("data"))
     df.write.partitionBy("id").save(path.getCanonicalPath)
 
     checkAnswer(
-      TestSQLContext.read.load(path.getCanonicalPath),
+      ctx.read.load(path.getCanonicalPath),
       (0 to 99).map(Row(1, _)).toSeq)
 
     Utils.deleteRecursively(path)
@@ -43,12 +43,12 @@ class PartitionedWriteSuite extends QueryTest {
     val path = Utils.createTempDir()
     path.delete()
 
-    val base = TestSQLContext.range(100)
+    val base = ctx.range(100)
     val df = base.unionAll(base).select($"id", lit(1).as("data"))
     df.write.partitionBy("id").save(path.getCanonicalPath)
 
     checkAnswer(
-      TestSQLContext.read.load(path.getCanonicalPath),
+      ctx.read.load(path.getCanonicalPath),
       (0 to 99).map(Row(1, _)).toSeq ++ (0 to 99).map(Row(1, _)).toSeq)
 
     Utils.deleteRecursively(path)

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/sources/PrunedScanSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/PrunedScanSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/PrunedScanSuite.scala
index 0d51834..a89c5f8 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/PrunedScanSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/PrunedScanSuite.scala
@@ -21,6 +21,7 @@ import scala.language.existentials
 
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql._
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.sql.types._
 
 class PrunedScanSource extends RelationProvider {
@@ -51,10 +52,12 @@ case class SimplePrunedScan(from: Int, to: Int)(@transient 
val sqlContext: SQLCo
   }
 }
 
-class PrunedScanSuite extends DataSourceTest {
+class PrunedScanSuite extends DataSourceTest with SharedSQLContext {
+  protected override lazy val sql = caseInsensitiveContext.sql _
 
-  before {
-    caseInsensitiveContext.sql(
+  override def beforeAll(): Unit = {
+    super.beforeAll()
+    sql(
       """
         |CREATE TEMPORARY TABLE oneToTenPruned
         |USING org.apache.spark.sql.sources.PrunedScanSource
@@ -114,7 +117,7 @@ class PrunedScanSuite extends DataSourceTest {
 
   def testPruning(sqlString: String, expectedColumns: String*): Unit = {
     test(s"Columns output ${expectedColumns.mkString(",")}: $sqlString") {
-      val queryExecution = caseInsensitiveContext.sql(sqlString).queryExecution
+      val queryExecution = sql(sqlString).queryExecution
       val rawPlan = queryExecution.executedPlan.collect {
         case p: execution.PhysicalRDD => p
       } match {

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
index 31730a3..f18546b 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
@@ -19,25 +19,22 @@ package org.apache.spark.sql.sources
 
 import java.io.File
 
-import org.scalatest.BeforeAndAfterAll
+import org.scalatest.BeforeAndAfter
 
 import org.apache.spark.sql.{AnalysisException, SaveMode, SQLConf, DataFrame}
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.sql.types._
 import org.apache.spark.util.Utils
 
-class SaveLoadSuite extends DataSourceTest with BeforeAndAfterAll {
-
-  import caseInsensitiveContext.sql
-
+class SaveLoadSuite extends DataSourceTest with SharedSQLContext with 
BeforeAndAfter {
+  protected override lazy val sql = caseInsensitiveContext.sql _
   private lazy val sparkContext = caseInsensitiveContext.sparkContext
-
-  var originalDefaultSource: String = null
-
-  var path: File = null
-
-  var df: DataFrame = null
+  private var originalDefaultSource: String = null
+  private var path: File = null
+  private var df: DataFrame = null
 
   override def beforeAll(): Unit = {
+    super.beforeAll()
     originalDefaultSource = caseInsensitiveContext.conf.defaultDataSourceName
 
     path = Utils.createTempDir()
@@ -49,11 +46,14 @@ class SaveLoadSuite extends DataSourceTest with 
BeforeAndAfterAll {
   }
 
   override def afterAll(): Unit = {
-    caseInsensitiveContext.conf.setConf(SQLConf.DEFAULT_DATA_SOURCE_NAME, 
originalDefaultSource)
+    try {
+      caseInsensitiveContext.conf.setConf(SQLConf.DEFAULT_DATA_SOURCE_NAME, 
originalDefaultSource)
+    } finally {
+      super.afterAll()
+    }
   }
 
   after {
-    caseInsensitiveContext.conf.setConf(SQLConf.DEFAULT_DATA_SOURCE_NAME, 
originalDefaultSource)
     Utils.deleteRecursively(path)
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala
index e34e095..12af806 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala
@@ -22,6 +22,7 @@ import java.sql.{Date, Timestamp}
 
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql._
+import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.sql.types._
 
 class DefaultSource extends SimpleScanSource
@@ -95,8 +96,8 @@ case class AllDataTypesScan(
   }
 }
 
-class TableScanSuite extends DataSourceTest {
-  import caseInsensitiveContext.sql
+class TableScanSuite extends DataSourceTest with SharedSQLContext {
+  protected override lazy val sql = caseInsensitiveContext.sql _
 
   private lazy val tableWithSchemaExpected = (1 to 10).map { i =>
     Row(
@@ -122,7 +123,8 @@ class TableScanSuite extends DataSourceTest {
       Row(Seq(s"str_$i", s"str_${i + 1}"), Row(Seq(Date.valueOf(s"1970-01-${i 
+ 1}")))))
   }.toSeq
 
-  before {
+  override def beforeAll(): Unit = {
+    super.beforeAll()
     sql(
       """
         |CREATE TEMPORARY TABLE oneToTen
@@ -303,9 +305,10 @@ class TableScanSuite extends DataSourceTest {
       sql("SELECT i * 2 FROM oneToTen"),
       (1 to 10).map(i => Row(i * 2)).toSeq)
 
-    assertCached(sql("SELECT a.i, b.i FROM oneToTen a JOIN oneToTen b ON a.i = 
b.i + 1"), 2)
-    checkAnswer(
-      sql("SELECT a.i, b.i FROM oneToTen a JOIN oneToTen b ON a.i = b.i + 1"),
+    assertCached(sql(
+      "SELECT a.i, b.i FROM oneToTen a JOIN oneToTen b ON a.i = b.i + 1"), 2)
+    checkAnswer(sql(
+      "SELECT a.i, b.i FROM oneToTen a JOIN oneToTen b ON a.i = b.i + 1"),
       (2 to 10).map(i => Row(i, i - 1)).toSeq)
 
     // Verify uncaching

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala
new file mode 100644
index 0000000..1374a97
--- /dev/null
+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala
@@ -0,0 +1,290 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.test
+
+import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.{DataFrame, SQLContext, SQLImplicits}
+
+/**
+ * A collection of sample data used in SQL tests.
+ */
+private[sql] trait SQLTestData { self =>
+  protected def _sqlContext: SQLContext
+
+  // Helper object to import SQL implicits without a concrete SQLContext
+  private object internalImplicits extends SQLImplicits {
+    protected override def _sqlContext: SQLContext = self._sqlContext
+  }
+
+  import internalImplicits._
+  import SQLTestData._
+
+  // Note: all test data should be lazy because the SQLContext is not set up 
yet.
+
+  protected lazy val testData: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      (1 to 100).map(i => TestData(i, i.toString))).toDF()
+    df.registerTempTable("testData")
+    df
+  }
+
+  protected lazy val testData2: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      TestData2(1, 1) ::
+      TestData2(1, 2) ::
+      TestData2(2, 1) ::
+      TestData2(2, 2) ::
+      TestData2(3, 1) ::
+      TestData2(3, 2) :: Nil, 2).toDF()
+    df.registerTempTable("testData2")
+    df
+  }
+
+  protected lazy val testData3: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      TestData3(1, None) ::
+      TestData3(2, Some(2)) :: Nil).toDF()
+    df.registerTempTable("testData3")
+    df
+  }
+
+  protected lazy val negativeData: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      (1 to 100).map(i => TestData(-i, (-i).toString))).toDF()
+    df.registerTempTable("negativeData")
+    df
+  }
+
+  protected lazy val largeAndSmallInts: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      LargeAndSmallInts(2147483644, 1) ::
+      LargeAndSmallInts(1, 2) ::
+      LargeAndSmallInts(2147483645, 1) ::
+      LargeAndSmallInts(2, 2) ::
+      LargeAndSmallInts(2147483646, 1) ::
+      LargeAndSmallInts(3, 2) :: Nil).toDF()
+    df.registerTempTable("largeAndSmallInts")
+    df
+  }
+
+  protected lazy val decimalData: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      DecimalData(1, 1) ::
+      DecimalData(1, 2) ::
+      DecimalData(2, 1) ::
+      DecimalData(2, 2) ::
+      DecimalData(3, 1) ::
+      DecimalData(3, 2) :: Nil).toDF()
+    df.registerTempTable("decimalData")
+    df
+  }
+
+  protected lazy val binaryData: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      BinaryData("12".getBytes, 1) ::
+      BinaryData("22".getBytes, 5) ::
+      BinaryData("122".getBytes, 3) ::
+      BinaryData("121".getBytes, 2) ::
+      BinaryData("123".getBytes, 4) :: Nil).toDF()
+    df.registerTempTable("binaryData")
+    df
+  }
+
+  protected lazy val upperCaseData: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      UpperCaseData(1, "A") ::
+      UpperCaseData(2, "B") ::
+      UpperCaseData(3, "C") ::
+      UpperCaseData(4, "D") ::
+      UpperCaseData(5, "E") ::
+      UpperCaseData(6, "F") :: Nil).toDF()
+    df.registerTempTable("upperCaseData")
+    df
+  }
+
+  protected lazy val lowerCaseData: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      LowerCaseData(1, "a") ::
+      LowerCaseData(2, "b") ::
+      LowerCaseData(3, "c") ::
+      LowerCaseData(4, "d") :: Nil).toDF()
+    df.registerTempTable("lowerCaseData")
+    df
+  }
+
+  protected lazy val arrayData: RDD[ArrayData] = {
+    val rdd = _sqlContext.sparkContext.parallelize(
+      ArrayData(Seq(1, 2, 3), Seq(Seq(1, 2, 3))) ::
+      ArrayData(Seq(2, 3, 4), Seq(Seq(2, 3, 4))) :: Nil)
+    rdd.toDF().registerTempTable("arrayData")
+    rdd
+  }
+
+  protected lazy val mapData: RDD[MapData] = {
+    val rdd = _sqlContext.sparkContext.parallelize(
+      MapData(Map(1 -> "a1", 2 -> "b1", 3 -> "c1", 4 -> "d1", 5 -> "e1")) ::
+      MapData(Map(1 -> "a2", 2 -> "b2", 3 -> "c2", 4 -> "d2")) ::
+      MapData(Map(1 -> "a3", 2 -> "b3", 3 -> "c3")) ::
+      MapData(Map(1 -> "a4", 2 -> "b4")) ::
+      MapData(Map(1 -> "a5")) :: Nil)
+    rdd.toDF().registerTempTable("mapData")
+    rdd
+  }
+
+  protected lazy val repeatedData: RDD[StringData] = {
+    val rdd = 
_sqlContext.sparkContext.parallelize(List.fill(2)(StringData("test")))
+    rdd.toDF().registerTempTable("repeatedData")
+    rdd
+  }
+
+  protected lazy val nullableRepeatedData: RDD[StringData] = {
+    val rdd = _sqlContext.sparkContext.parallelize(
+      List.fill(2)(StringData(null)) ++
+      List.fill(2)(StringData("test")))
+    rdd.toDF().registerTempTable("nullableRepeatedData")
+    rdd
+  }
+
+  protected lazy val nullInts: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      NullInts(1) ::
+      NullInts(2) ::
+      NullInts(3) ::
+      NullInts(null) :: Nil).toDF()
+    df.registerTempTable("nullInts")
+    df
+  }
+
+  protected lazy val allNulls: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      NullInts(null) ::
+      NullInts(null) ::
+      NullInts(null) ::
+      NullInts(null) :: Nil).toDF()
+    df.registerTempTable("allNulls")
+    df
+  }
+
+  protected lazy val nullStrings: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      NullStrings(1, "abc") ::
+      NullStrings(2, "ABC") ::
+      NullStrings(3, null) :: Nil).toDF()
+    df.registerTempTable("nullStrings")
+    df
+  }
+
+  protected lazy val tableName: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(TableName("test") :: 
Nil).toDF()
+    df.registerTempTable("tableName")
+    df
+  }
+
+  protected lazy val unparsedStrings: RDD[String] = {
+    _sqlContext.sparkContext.parallelize(
+      "1, A1, true, null" ::
+      "2, B2, false, null" ::
+      "3, C3, true, null" ::
+      "4, D4, true, 2147483644" :: Nil)
+  }
+
+  // An RDD with 4 elements and 8 partitions
+  protected lazy val withEmptyParts: RDD[IntField] = {
+    val rdd = _sqlContext.sparkContext.parallelize((1 to 4).map(IntField), 8)
+    rdd.toDF().registerTempTable("withEmptyParts")
+    rdd
+  }
+
+  protected lazy val person: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      Person(0, "mike", 30) ::
+      Person(1, "jim", 20) :: Nil).toDF()
+    df.registerTempTable("person")
+    df
+  }
+
+  protected lazy val salary: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      Salary(0, 2000.0) ::
+      Salary(1, 1000.0) :: Nil).toDF()
+    df.registerTempTable("salary")
+    df
+  }
+
+  protected lazy val complexData: DataFrame = {
+    val df = _sqlContext.sparkContext.parallelize(
+      ComplexData(Map("1" -> 1), TestData(1, "1"), Seq(1, 1, 1), true) ::
+      ComplexData(Map("2" -> 2), TestData(2, "2"), Seq(2, 2, 2), false) ::
+      Nil).toDF()
+    df.registerTempTable("complexData")
+    df
+  }
+
+  /**
+   * Initialize all test data such that all temp tables are properly 
registered.
+   */
+  def loadTestData(): Unit = {
+    assert(_sqlContext != null, "attempted to initialize test data before 
SQLContext.")
+    testData
+    testData2
+    testData3
+    negativeData
+    largeAndSmallInts
+    decimalData
+    binaryData
+    upperCaseData
+    lowerCaseData
+    arrayData
+    mapData
+    repeatedData
+    nullableRepeatedData
+    nullInts
+    allNulls
+    nullStrings
+    tableName
+    unparsedStrings
+    withEmptyParts
+    person
+    salary
+    complexData
+  }
+}
+
+/**
+ * Case classes used in test data.
+ */
+private[sql] object SQLTestData {
+  case class TestData(key: Int, value: String)
+  case class TestData2(a: Int, b: Int)
+  case class TestData3(a: Int, b: Option[Int])
+  case class LargeAndSmallInts(a: Int, b: Int)
+  case class DecimalData(a: BigDecimal, b: BigDecimal)
+  case class BinaryData(a: Array[Byte], b: Int)
+  case class UpperCaseData(N: Int, L: String)
+  case class LowerCaseData(n: Int, l: String)
+  case class ArrayData(data: Seq[Int], nestedData: Seq[Seq[Int]])
+  case class MapData(data: scala.collection.Map[Int, String])
+  case class StringData(s: String)
+  case class IntField(i: Int)
+  case class NullInts(a: Integer)
+  case class NullStrings(n: Int, s: String)
+  case class TableName(tableName: String)
+  case class Person(id: Int, name: String, age: Int)
+  case class Salary(personId: Int, salary: Double)
+  case class ComplexData(m: Map[String, Int], s: TestData, a: Seq[Int], b: 
Boolean)
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala
index 1066695..cdd691e 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala
@@ -21,15 +21,71 @@ import java.io.File
 import java.util.UUID
 
 import scala.util.Try
+import scala.language.implicitConversions
+
+import org.apache.hadoop.conf.Configuration
+import org.scalatest.BeforeAndAfterAll
 
 import org.apache.spark.SparkFunSuite
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.{DataFrame, SQLContext, SQLImplicits}
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.util.Utils
 
-trait SQLTestUtils { this: SparkFunSuite =>
-  protected def sqlContext: SQLContext
+/**
+ * Helper trait that should be extended by all SQL test suites.
+ *
+ * This allows subclasses to plugin a custom [[SQLContext]]. It comes with 
test data
+ * prepared in advance as well as all implicit conversions used extensively by 
dataframes.
+ * To use implicit methods, import `testImplicits._` instead of through the 
[[SQLContext]].
+ *
+ * Subclasses should *not* create [[SQLContext]]s in the test suite 
constructor, which is
+ * prone to leaving multiple overlapping [[org.apache.spark.SparkContext]]s in 
the same JVM.
+ */
+private[sql] trait SQLTestUtils
+  extends SparkFunSuite
+  with BeforeAndAfterAll
+  with SQLTestData { self =>
+
+  protected def _sqlContext: SQLContext
+
+  // Whether to materialize all test data before the first test is run
+  private var loadTestDataBeforeTests = false
+
+  // Shorthand for running a query using our SQLContext
+  protected lazy val sql = _sqlContext.sql _
+
+  /**
+   * A helper object for importing SQL implicits.
+   *
+   * Note that the alternative of importing `sqlContext.implicits._` is not 
possible here.
+   * This is because we create the [[SQLContext]] immediately before the first 
test is run,
+   * but the implicits import is needed in the constructor.
+   */
+  protected object testImplicits extends SQLImplicits {
+    protected override def _sqlContext: SQLContext = self._sqlContext
+  }
+
+  /**
+   * Materialize the test data immediately after the [[SQLContext]] is set up.
+   * This is necessary if the data is accessed by name but not through direct 
reference.
+   */
+  protected def setupTestData(): Unit = {
+    loadTestDataBeforeTests = true
+  }
 
-  protected def configuration = sqlContext.sparkContext.hadoopConfiguration
+  protected override def beforeAll(): Unit = {
+    super.beforeAll()
+    if (loadTestDataBeforeTests) {
+      loadTestData()
+    }
+  }
+
+  /**
+   * The Hadoop configuration used by the active [[SQLContext]].
+   */
+  protected def configuration: Configuration = {
+    _sqlContext.sparkContext.hadoopConfiguration
+  }
 
   /**
    * Sets all SQL configurations specified in `pairs`, calls `f`, and then 
restore all SQL
@@ -39,12 +95,12 @@ trait SQLTestUtils { this: SparkFunSuite =>
    */
   protected def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = {
     val (keys, values) = pairs.unzip
-    val currentValues = keys.map(key => 
Try(sqlContext.conf.getConfString(key)).toOption)
-    (keys, values).zipped.foreach(sqlContext.conf.setConfString)
+    val currentValues = keys.map(key => 
Try(_sqlContext.conf.getConfString(key)).toOption)
+    (keys, values).zipped.foreach(_sqlContext.conf.setConfString)
     try f finally {
       keys.zip(currentValues).foreach {
-        case (key, Some(value)) => sqlContext.conf.setConfString(key, value)
-        case (key, None) => sqlContext.conf.unsetConf(key)
+        case (key, Some(value)) => _sqlContext.conf.setConfString(key, value)
+        case (key, None) => _sqlContext.conf.unsetConf(key)
       }
     }
   }
@@ -76,7 +132,7 @@ trait SQLTestUtils { this: SparkFunSuite =>
    * Drops temporary table `tableName` after calling `f`.
    */
   protected def withTempTable(tableNames: String*)(f: => Unit): Unit = {
-    try f finally tableNames.foreach(sqlContext.dropTempTable)
+    try f finally tableNames.foreach(_sqlContext.dropTempTable)
   }
 
   /**
@@ -85,7 +141,7 @@ trait SQLTestUtils { this: SparkFunSuite =>
   protected def withTable(tableNames: String*)(f: => Unit): Unit = {
     try f finally {
       tableNames.foreach { name =>
-        sqlContext.sql(s"DROP TABLE IF EXISTS $name")
+        _sqlContext.sql(s"DROP TABLE IF EXISTS $name")
       }
     }
   }
@@ -98,12 +154,12 @@ trait SQLTestUtils { this: SparkFunSuite =>
     val dbName = s"db_${UUID.randomUUID().toString.replace('-', '_')}"
 
     try {
-      sqlContext.sql(s"CREATE DATABASE $dbName")
+      _sqlContext.sql(s"CREATE DATABASE $dbName")
     } catch { case cause: Throwable =>
       fail("Failed to create temporary database", cause)
     }
 
-    try f(dbName) finally sqlContext.sql(s"DROP DATABASE $dbName CASCADE")
+    try f(dbName) finally _sqlContext.sql(s"DROP DATABASE $dbName CASCADE")
   }
 
   /**
@@ -111,7 +167,15 @@ trait SQLTestUtils { this: SparkFunSuite =>
    * `f` returns.
    */
   protected def activateDatabase(db: String)(f: => Unit): Unit = {
-    sqlContext.sql(s"USE $db")
-    try f finally sqlContext.sql(s"USE default")
+    _sqlContext.sql(s"USE $db")
+    try f finally _sqlContext.sql(s"USE default")
+  }
+
+  /**
+   * Turn a logical plan into a [[DataFrame]]. This should be removed once we 
have an easier
+   * way to construct [[DataFrame]] directly out of local data without relying 
on implicits.
+   */
+  protected implicit def logicalPlanToSparkQuery(plan: LogicalPlan): DataFrame 
= {
+    DataFrame(_sqlContext, plan)
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSQLContext.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSQLContext.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSQLContext.scala
new file mode 100644
index 0000000..3cfd822
--- /dev/null
+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSQLContext.scala
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.test
+
+import org.apache.spark.sql.SQLContext
+
+
+/**
+ * Helper trait for SQL test suites where all tests share a single 
[[TestSQLContext]].
+ */
+private[sql] trait SharedSQLContext extends SQLTestUtils {
+
+  /**
+   * The [[TestSQLContext]] to use for all tests in this suite.
+   *
+   * By default, the underlying [[org.apache.spark.SparkContext]] will be run 
in local
+   * mode with the default test configurations.
+   */
+  private var _ctx: TestSQLContext = null
+
+  /**
+   * The [[TestSQLContext]] to use for all tests in this suite.
+   */
+  protected def ctx: TestSQLContext = _ctx
+  protected def sqlContext: TestSQLContext = _ctx
+  protected override def _sqlContext: SQLContext = _ctx
+
+  /**
+   * Initialize the [[TestSQLContext]].
+   */
+  protected override def beforeAll(): Unit = {
+    if (_ctx == null) {
+      _ctx = new TestSQLContext
+    }
+    // Ensure we have initialized the context before calling parent code
+    super.beforeAll()
+  }
+
+  /**
+   * Stop the underlying [[org.apache.spark.SparkContext]], if any.
+   */
+  protected override def afterAll(): Unit = {
+    try {
+      if (_ctx != null) {
+        _ctx.sparkContext.stop()
+        _ctx = null
+      }
+    } finally {
+      super.afterAll()
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala
new file mode 100644
index 0000000..92ef2f7
--- /dev/null
+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.test
+
+import org.apache.spark.{SparkConf, SparkContext}
+import org.apache.spark.sql.{SQLConf, SQLContext}
+
+
+/**
+ * A special [[SQLContext]] prepared for testing.
+ */
+private[sql] class TestSQLContext(sc: SparkContext) extends SQLContext(sc) { 
self =>
+
+  def this() {
+    this(new SparkContext("local[2]", "test-sql-context",
+      new SparkConf().set("spark.sql.testkey", "true")))
+  }
+
+  // Use fewer partitions to speed up testing
+  protected[sql] override def createSession(): SQLSession = new 
this.SQLSession()
+
+  /** A special [[SQLSession]] that uses fewer shuffle partitions than normal. 
*/
+  protected[sql] class SQLSession extends super.SQLSession {
+    protected[sql] override lazy val conf: SQLConf = new SQLConf {
+      override def numShufflePartitions: Int = 
this.getConf(SQLConf.SHUFFLE_PARTITIONS, 5)
+    }
+  }
+
+  // Needed for Java tests
+  def loadTestData(): Unit = {
+    testData.loadTestData()
+  }
+
+  private object testData extends SQLTestData {
+    protected override def _sqlContext: SQLContext = self
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala
 
b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala
index 806240e..bf431cd 100644
--- 
a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala
+++ 
b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala
@@ -27,7 +27,6 @@ import org.scalatest.concurrent.Eventually._
 import org.scalatest.selenium.WebBrowser
 import org.scalatest.time.SpanSugar._
 
-import org.apache.spark.sql.hive.HiveContext
 import org.apache.spark.ui.SparkUICssErrorHandler
 
 class UISeleniumSuite
@@ -36,7 +35,6 @@ class UISeleniumSuite
 
   implicit var webDriver: WebDriver = _
   var server: HiveThriftServer2 = _
-  var hc: HiveContext = _
   val uiPort = 20000 + Random.nextInt(10000)
   override def mode: ServerMode.Value = ServerMode.binary
 

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
index 59e65ff..574624d 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
@@ -26,7 +26,7 @@ import org.apache.spark.sql.hive.test.TestHive.implicits._
 import org.apache.spark.sql.sources.DataSourceTest
 import org.apache.spark.sql.test.{ExamplePointUDT, SQLTestUtils}
 import org.apache.spark.sql.types.{DecimalType, StringType, StructType}
-import org.apache.spark.sql.{Row, SaveMode}
+import org.apache.spark.sql.{Row, SaveMode, SQLContext}
 import org.apache.spark.{Logging, SparkFunSuite}
 
 
@@ -53,7 +53,8 @@ class HiveMetastoreCatalogSuite extends SparkFunSuite with 
Logging {
 }
 
 class DataSourceWithHiveMetastoreCatalogSuite extends DataSourceTest with 
SQLTestUtils {
-  override val sqlContext = TestHive
+  override def _sqlContext: SQLContext = TestHive
+  import testImplicits._
 
   private val testDF = range(1, 3).select(
     ('id + 0.1) cast DecimalType(10, 3) as 'd1,

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
index 1fa005d..fe0db52 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
@@ -19,14 +19,13 @@ package org.apache.spark.sql.hive
 
 import org.apache.spark.sql.hive.test.TestHive
 import org.apache.spark.sql.execution.datasources.parquet.ParquetTest
-import org.apache.spark.sql.{QueryTest, Row}
+import org.apache.spark.sql.{QueryTest, Row, SQLContext}
 
 case class Cases(lower: String, UPPER: String)
 
 class HiveParquetSuite extends QueryTest with ParquetTest {
-  val sqlContext = TestHive
-
-  import sqlContext._
+  private val ctx = TestHive
+  override def _sqlContext: SQLContext = ctx
 
   test("Case insensitive attribute names") {
     withParquetTable((1 to 4).map(i => Cases(i.toString, i.toString)), 
"cases") {
@@ -54,7 +53,7 @@ class HiveParquetSuite extends QueryTest with ParquetTest {
   test("Converting Hive to Parquet Table via saveAsParquetFile") {
     withTempPath { dir =>
       sql("SELECT * FROM src").write.parquet(dir.getCanonicalPath)
-      read.parquet(dir.getCanonicalPath).registerTempTable("p")
+      ctx.read.parquet(dir.getCanonicalPath).registerTempTable("p")
       withTempTable("p") {
         checkAnswer(
           sql("SELECT * FROM src ORDER BY key"),
@@ -67,7 +66,7 @@ class HiveParquetSuite extends QueryTest with ParquetTest {
     withParquetTable((1 to 10).map(i => (i, s"val_$i")), "t") {
       withTempPath { file =>
         sql("SELECT * FROM t LIMIT 1").write.parquet(file.getCanonicalPath)
-        read.parquet(file.getCanonicalPath).registerTempTable("p")
+        ctx.read.parquet(file.getCanonicalPath).registerTempTable("p")
         withTempTable("p") {
           // let's do three overwrites for good measure
           sql("INSERT OVERWRITE TABLE p SELECT * FROM t")

http://git-wip-us.apache.org/repos/asf/spark/blob/8187b3ae/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
index 7f36a48..20a5058 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
@@ -22,7 +22,6 @@ import java.io.{IOException, File}
 import scala.collection.mutable.ArrayBuffer
 
 import org.apache.hadoop.fs.Path
-import org.apache.hadoop.mapred.InvalidInputException
 import org.scalatest.BeforeAndAfterAll
 
 import org.apache.spark.Logging
@@ -42,7 +41,8 @@ import org.apache.spark.util.Utils
  */
 class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with 
BeforeAndAfterAll
   with Logging {
-  override val sqlContext = TestHive
+  override def _sqlContext: SQLContext = TestHive
+  private val sqlContext = _sqlContext
 
   var jsonFilePath: String = _
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to