Repository: spark
Updated Branches:
  refs/heads/branch-2.0 c0715f33b -> 23789e358


http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/NGramExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/NGramExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/NGramExample.scala
index 77b913a..1b71a39 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/NGramExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/NGramExample.scala
@@ -18,20 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.NGram
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object NGramExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("NGramExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName("NGramExample").getOrCreate()
 
     // $example on$
-    val wordDataFrame = sqlContext.createDataFrame(Seq(
+    val wordDataFrame = spark.createDataFrame(Seq(
       (0, Array("Hi", "I", "heard", "about", "Spark")),
       (1, Array("I", "wish", "Java", "could", "use", "case", "classes")),
       (2, Array("Logistic", "regression", "models", "are", "neat"))
@@ -41,7 +38,8 @@ object NGramExample {
     val ngramDataFrame = ngram.transform(wordDataFrame)
     
ngramDataFrame.take(3).map(_.getAs[Stream[String]]("ngrams").toList).foreach(println)
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala
index 5ea1270..8d54555 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala
@@ -18,21 +18,18 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
-import org.apache.spark.ml.classification.{NaiveBayes}
+import org.apache.spark.ml.classification.NaiveBayes
 import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object NaiveBayesExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("NaiveBayesExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName("NaiveBayesExample").getOrCreate()
     // $example on$
     // Load the data stored in LIBSVM format as a DataFrame.
-    val data = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val data = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     // Split the data into training and test sets (30% held out for testing)
     val Array(trainingData, testData) = data.randomSplit(Array(0.7, 0.3))
@@ -53,6 +50,8 @@ object NaiveBayesExample {
     val precision = evaluator.evaluate(predictions)
     println("Precision:" + precision)
     // $example off$
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/NormalizerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/NormalizerExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/NormalizerExample.scala
index 6b33c16..4622d69 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/NormalizerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/NormalizerExample.scala
@@ -18,20 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.Normalizer
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object NormalizerExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("NormalizerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName("NormalizerExample").getOrCreate()
 
     // $example on$
-    val dataFrame = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val dataFrame = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     // Normalize each Vector using $L^1$ norm.
     val normalizer = new Normalizer()
@@ -46,7 +43,8 @@ object NormalizerExample {
     val lInfNormData = normalizer.transform(dataFrame, normalizer.p -> 
Double.PositiveInfinity)
     lInfNormData.show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/OneHotEncoderExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/OneHotEncoderExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/OneHotEncoderExample.scala
index cb9fe65..3384361 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/OneHotEncoderExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/OneHotEncoderExample.scala
@@ -18,20 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.{OneHotEncoder, StringIndexer}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object OneHotEncoderExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("OneHotEncoderExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("OneHotEncoderExample").getOrCreate()
 
     // $example on$
-    val df = sqlContext.createDataFrame(Seq(
+    val df = spark.createDataFrame(Seq(
       (0, "a"),
       (1, "b"),
       (2, "c"),
@@ -52,7 +49,8 @@ object OneHotEncoderExample {
     val encoded = encoder.transform(indexed)
     encoded.select("id", "categoryVec").show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
index 0b5d31c..e2351c6 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
@@ -22,7 +22,6 @@ import java.util.concurrent.TimeUnit.{NANOSECONDS => NANO}
 
 import scopt.OptionParser
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.examples.mllib.AbstractParams
 import org.apache.spark.ml.classification.{LogisticRegression, OneVsRest}
@@ -31,7 +30,7 @@ import org.apache.spark.mllib.evaluation.MulticlassMetrics
 import org.apache.spark.mllib.linalg.Vector
 import org.apache.spark.sql.DataFrame
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 /**
  * An example runner for Multiclass to Binary Reduction with One Vs Rest.
@@ -110,18 +109,16 @@ object OneVsRestExample {
   }
 
   private def run(params: Params) {
-    val conf = new SparkConf().setAppName(s"OneVsRestExample with $params")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName(s"OneVsRestExample with 
$params").getOrCreate()
 
     // $example on$
-    val inputData = sqlContext.read.format("libsvm").load(params.input)
+    val inputData = spark.read.format("libsvm").load(params.input)
     // compute the train/test split: if testInput is not provided use part of 
input.
     val data = params.testInput match {
       case Some(t) =>
         // compute the number of features in the training set.
         val numFeatures = inputData.first().getAs[Vector](1).size
-        val testData = sqlContext.read.option("numFeatures", 
numFeatures.toString)
+        val testData = spark.read.option("numFeatures", numFeatures.toString)
           .format("libsvm").load(t)
         Array[DataFrame](inputData, testData)
       case None =>
@@ -175,7 +172,7 @@ object OneVsRestExample {
     println(fprs.map {case (label, fpr) => label + "\t" + fpr}.mkString("\n"))
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 
   private def time[R](block: => R): (Long, R) = {

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/PCAExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/PCAExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/PCAExample.scala
index 535652e..14394d5 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/PCAExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/PCAExample.scala
@@ -18,18 +18,15 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.PCA
 import org.apache.spark.mllib.linalg.Vectors
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object PCAExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("PCAExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName("PCAExample").getOrCreate()
 
     // $example on$
     val data = Array(
@@ -37,7 +34,7 @@ object PCAExample {
       Vectors.dense(2.0, 0.0, 3.0, 4.0, 5.0),
       Vectors.dense(4.0, 0.0, 0.0, 6.0, 7.0)
     )
-    val df = 
sqlContext.createDataFrame(data.map(Tuple1.apply)).toDF("features")
+    val df = spark.createDataFrame(data.map(Tuple1.apply)).toDF("features")
     val pca = new PCA()
       .setInputCol("features")
       .setOutputCol("pcaFeatures")
@@ -47,7 +44,8 @@ object PCAExample {
     val result = pcaDF.select("pcaFeatures")
     result.show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/PipelineExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/PipelineExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/PipelineExample.scala
index 6c29063..61b34ae 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/PipelineExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/PipelineExample.scala
@@ -18,7 +18,6 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.{Pipeline, PipelineModel}
 import org.apache.spark.ml.classification.LogisticRegression
@@ -26,18 +25,16 @@ import org.apache.spark.ml.feature.{HashingTF, Tokenizer}
 import org.apache.spark.mllib.linalg.Vector
 import org.apache.spark.sql.Row
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object PipelineExample {
 
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("PipelineExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName("PipelineExample").getOrCreate()
 
     // $example on$
     // Prepare training documents from a list of (id, text, label) tuples.
-    val training = sqlContext.createDataFrame(Seq(
+    val training = spark.createDataFrame(Seq(
       (0L, "a b c d e spark", 1.0),
       (1L, "b d", 0.0),
       (2L, "spark f g h", 1.0),
@@ -71,7 +68,7 @@ object PipelineExample {
     val sameModel = PipelineModel.load("/tmp/spark-logistic-regression-model")
 
     // Prepare test documents, which are unlabeled (id, text) tuples.
-    val test = sqlContext.createDataFrame(Seq(
+    val test = spark.createDataFrame(Seq(
       (4L, "spark i j k"),
       (5L, "l m n"),
       (6L, "mapreduce spark"),
@@ -87,7 +84,7 @@ object PipelineExample {
       }
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/PolynomialExpansionExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/PolynomialExpansionExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/PolynomialExpansionExample.scala
index 3014008..4d8c672 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/PolynomialExpansionExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/PolynomialExpansionExample.scala
@@ -18,18 +18,15 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.PolynomialExpansion
 import org.apache.spark.mllib.linalg.Vectors
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object PolynomialExpansionExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("PolynomialExpansionExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("PolynomialExpansionExample").getOrCreate()
 
     // $example on$
     val data = Array(
@@ -37,7 +34,7 @@ object PolynomialExpansionExample {
       Vectors.dense(0.0, 0.0),
       Vectors.dense(0.6, -1.1)
     )
-    val df = 
sqlContext.createDataFrame(data.map(Tuple1.apply)).toDF("features")
+    val df = spark.createDataFrame(data.map(Tuple1.apply)).toDF("features")
     val polynomialExpansion = new PolynomialExpansion()
       .setInputCol("features")
       .setOutputCol("polyFeatures")
@@ -45,7 +42,8 @@ object PolynomialExpansionExample {
     val polyDF = polynomialExpansion.transform(df)
     polyDF.select("polyFeatures").take(3).foreach(println)
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala
index e64e673..0839c60 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala
@@ -15,25 +15,21 @@
  * limitations under the License.
  */
 
-// scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.QuantileDiscretizer
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object QuantileDiscretizerExample {
   def main(args: Array[String]) {
-    val conf = new SparkConf().setAppName("QuantileDiscretizerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
-    import sqlContext.implicits._
+    val spark = 
SparkSession.builder.appName("QuantileDiscretizerExample").getOrCreate()
+    import spark.implicits._
 
     // $example on$
     val data = Array((0, 18.0), (1, 19.0), (2, 8.0), (3, 5.0), (4, 2.2))
-    val df = sc.parallelize(data).toDF("id", "hour")
+    val df = spark.createDataFrame(data).toDF("id", "hour")
 
     val discretizer = new QuantileDiscretizer()
       .setInputCol("hour")
@@ -43,7 +39,7 @@ object QuantileDiscretizerExample {
     val result = discretizer.fit(df).transform(df)
     result.show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
-// scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/RFormulaExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/RFormulaExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/RFormulaExample.scala
index bec831d..699b621 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/RFormulaExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/RFormulaExample.scala
@@ -18,20 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.RFormula
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object RFormulaExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("RFormulaExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName("RFormulaExample").getOrCreate()
 
     // $example on$
-    val dataset = sqlContext.createDataFrame(Seq(
+    val dataset = spark.createDataFrame(Seq(
       (7, "US", 18, 1.0),
       (8, "CA", 12, 0.0),
       (9, "NZ", 15, 0.0)
@@ -43,7 +40,8 @@ object RFormulaExample {
     val output = formula.fit(dataset).transform(dataset)
     output.select("features", "label").show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala
index 6c9b52c..4192a9c 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala
@@ -18,24 +18,21 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.Pipeline
 import org.apache.spark.ml.classification.{RandomForestClassificationModel, 
RandomForestClassifier}
 import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
 import org.apache.spark.ml.feature.{IndexToString, StringIndexer, 
VectorIndexer}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object RandomForestClassifierExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("RandomForestClassifierExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("RandomForestClassifierExample").getOrCreate()
 
     // $example on$
     // Load and parse the data file, converting it to a DataFrame.
-    val data = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val data = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     // Index labels, adding metadata to the label column.
     // Fit on whole dataset to include all labels in index.
@@ -91,7 +88,7 @@ object RandomForestClassifierExample {
     println("Learned classification forest model:\n" + rfModel.toDebugString)
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala
index 4d2db01..5632f04 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala
@@ -18,24 +18,21 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.Pipeline
 import org.apache.spark.ml.evaluation.RegressionEvaluator
 import org.apache.spark.ml.feature.VectorIndexer
 import org.apache.spark.ml.regression.{RandomForestRegressionModel, 
RandomForestRegressor}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object RandomForestRegressorExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("RandomForestRegressorExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("RandomForestRegressorExample").getOrCreate()
 
     // $example on$
     // Load and parse the data file, converting it to a DataFrame.
-    val data = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val data = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     // Automatically identify categorical features, and index them.
     // Set maxCategories so features with > 4 distinct values are treated as 
continuous.
@@ -78,7 +75,7 @@ object RandomForestRegressorExample {
     println("Learned regression forest model:\n" + rfModel.toDebugString)
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/SQLTransformerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/SQLTransformerExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/SQLTransformerExample.scala
index 202925a..f03b29b 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/SQLTransformerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/SQLTransformerExample.scala
@@ -18,20 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.SQLTransformer
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object SQLTransformerExample {
   def main(args: Array[String]) {
-    val conf = new SparkConf().setAppName("SQLTransformerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("SQLTransformerExample").getOrCreate()
 
     // $example on$
-    val df = sqlContext.createDataFrame(
+    val df = spark.createDataFrame(
       Seq((0, 1.0, 3.0), (2, 2.0, 5.0))).toDF("id", "v1", "v2")
 
     val sqlTrans = new SQLTransformer().setStatement(
@@ -39,6 +36,8 @@ object SQLTransformerExample {
 
     sqlTrans.transform(df).show()
     // $example off$
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala
index f4d1fe5..dff7719 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala
@@ -18,12 +18,11 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 import org.apache.spark.ml.classification.LogisticRegression
 import org.apache.spark.ml.param.ParamMap
 import org.apache.spark.mllib.linalg.{Vector, Vectors}
 import org.apache.spark.mllib.regression.LabeledPoint
-import org.apache.spark.sql.{Row, SQLContext}
+import org.apache.spark.sql.{Row, SparkSession}
 
 /**
  * A simple example demonstrating ways to specify parameters for Estimators 
and Transformers.
@@ -35,15 +34,13 @@ import org.apache.spark.sql.{Row, SQLContext}
 object SimpleParamsExample {
 
   def main(args: Array[String]) {
-    val conf = new SparkConf().setAppName("SimpleParamsExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
-    import sqlContext.implicits._
+    val spark = 
SparkSession.builder.appName("SimpleParamsExample").getOrCreate()
+    import spark.implicits._
 
     // Prepare training data.
     // We use LabeledPoint, which is a case class.  Spark SQL can convert RDDs 
of case classes
     // into DataFrames, where it uses the case class metadata to infer the 
schema.
-    val training = sc.parallelize(Seq(
+    val training = spark.createDataFrame(Seq(
       LabeledPoint(1.0, Vectors.dense(0.0, 1.1, 0.1)),
       LabeledPoint(0.0, Vectors.dense(2.0, 1.0, -1.0)),
       LabeledPoint(0.0, Vectors.dense(2.0, 1.3, 1.0)),
@@ -59,7 +56,7 @@ object SimpleParamsExample {
       .setRegParam(0.01)
 
     // Learn a LogisticRegression model.  This uses the parameters stored in 
lr.
-    val model1 = lr.fit(training.toDF())
+    val model1 = lr.fit(training)
     // Since model1 is a Model (i.e., a Transformer produced by an Estimator),
     // we can view the parameters it used during fit().
     // This prints the parameter (name: value) pairs, where names are unique 
IDs for this
@@ -82,7 +79,7 @@ object SimpleParamsExample {
     println("Model 2 was fit using parameters: " + 
model2.parent.extractParamMap())
 
     // Prepare test data.
-    val test = sc.parallelize(Seq(
+    val test = spark.createDataFrame(Seq(
       LabeledPoint(1.0, Vectors.dense(-1.0, 1.5, 1.3)),
       LabeledPoint(0.0, Vectors.dense(3.0, 2.0, -0.1)),
       LabeledPoint(1.0, Vectors.dense(0.0, 2.2, -1.5))))
@@ -91,14 +88,14 @@ object SimpleParamsExample {
     // LogisticRegressionModel.transform will only use the 'features' column.
     // Note that model2.transform() outputs a 'myProbability' column instead 
of the usual
     // 'probability' column since we renamed the lr.probabilityCol parameter 
previously.
-    model2.transform(test.toDF())
+    model2.transform(test)
       .select("features", "label", "myProbability", "prediction")
       .collect()
       .foreach { case Row(features: Vector, label: Double, prob: Vector, 
prediction: Double) =>
         println(s"($features, $label) -> prob=$prob, prediction=$prediction")
       }
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala
index 9602801..0519900 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala
@@ -20,12 +20,11 @@ package org.apache.spark.examples.ml
 
 import scala.beans.BeanInfo
 
-import org.apache.spark.{SparkConf, SparkContext}
 import org.apache.spark.ml.Pipeline
 import org.apache.spark.ml.classification.LogisticRegression
 import org.apache.spark.ml.feature.{HashingTF, Tokenizer}
 import org.apache.spark.mllib.linalg.Vector
-import org.apache.spark.sql.{Row, SQLContext}
+import org.apache.spark.sql.{Row, SparkSession}
 
 @BeanInfo
 case class LabeledDocument(id: Long, text: String, label: Double)
@@ -43,13 +42,11 @@ case class Document(id: Long, text: String)
 object SimpleTextClassificationPipeline {
 
   def main(args: Array[String]) {
-    val conf = new SparkConf().setAppName("SimpleTextClassificationPipeline")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
-    import sqlContext.implicits._
+    val spark = 
SparkSession.builder.appName("SimpleTextClassificationPipeline").getOrCreate()
+    import spark.implicits._
 
     // Prepare training documents, which are labeled.
-    val training = sc.parallelize(Seq(
+    val training = spark.createDataFrame(Seq(
       LabeledDocument(0L, "a b c d e spark", 1.0),
       LabeledDocument(1L, "b d", 0.0),
       LabeledDocument(2L, "spark f g h", 1.0),
@@ -73,7 +70,7 @@ object SimpleTextClassificationPipeline {
     val model = pipeline.fit(training.toDF())
 
     // Prepare test documents, which are unlabeled.
-    val test = sc.parallelize(Seq(
+    val test = spark.createDataFrame(Seq(
       Document(4L, "spark i j k"),
       Document(5L, "l m n"),
       Document(6L, "spark hadoop spark"),
@@ -87,7 +84,7 @@ object SimpleTextClassificationPipeline {
         println(s"($id, $text) --> prob=$prob, prediction=$prediction")
       }
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/StandardScalerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/StandardScalerExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/StandardScalerExample.scala
index e343967..55f777c 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/StandardScalerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/StandardScalerExample.scala
@@ -18,20 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.StandardScaler
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object StandardScalerExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("StandardScalerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("StandardScalerExample").getOrCreate()
 
     // $example on$
-    val dataFrame = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val dataFrame = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     val scaler = new StandardScaler()
       .setInputCol("features")
@@ -46,7 +43,8 @@ object StandardScalerExample {
     val scaledData = scalerModel.transform(dataFrame)
     scaledData.show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/StopWordsRemoverExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/StopWordsRemoverExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/StopWordsRemoverExample.scala
index 8199be1..85e79c8 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/StopWordsRemoverExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/StopWordsRemoverExample.scala
@@ -18,31 +18,29 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.StopWordsRemover
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object StopWordsRemoverExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("StopWordsRemoverExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("StopWordsRemoverExample").getOrCreate()
 
     // $example on$
     val remover = new StopWordsRemover()
       .setInputCol("raw")
       .setOutputCol("filtered")
 
-    val dataSet = sqlContext.createDataFrame(Seq(
+    val dataSet = spark.createDataFrame(Seq(
       (0, Seq("I", "saw", "the", "red", "baloon")),
       (1, Seq("Mary", "had", "a", "little", "lamb"))
     )).toDF("id", "raw")
 
     remover.transform(dataSet).show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/StringIndexerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/StringIndexerExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/StringIndexerExample.scala
index 3f0e870..e01a768 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/StringIndexerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/StringIndexerExample.scala
@@ -18,20 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.StringIndexer
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object StringIndexerExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("StringIndexerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("StringIndexerExample").getOrCreate()
 
     // $example on$
-    val df = sqlContext.createDataFrame(
+    val df = spark.createDataFrame(
       Seq((0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c"))
     ).toDF("id", "category")
 
@@ -42,7 +39,8 @@ object StringIndexerExample {
     val indexed = indexer.fit(df).transform(df)
     indexed.show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/TfIdfExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/TfIdfExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/TfIdfExample.scala
index 396f073..910ef62 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/TfIdfExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/TfIdfExample.scala
@@ -18,21 +18,18 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object TfIdfExample {
 
   def main(args: Array[String]) {
-    val conf = new SparkConf().setAppName("TfIdfExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName("TfIdfExample").getOrCreate()
 
     // $example on$
-    val sentenceData = sqlContext.createDataFrame(Seq(
+    val sentenceData = spark.createDataFrame(Seq(
       (0, "Hi I heard about Spark"),
       (0, "I wish Java could use case classes"),
       (1, "Logistic regression models are neat")
@@ -50,6 +47,8 @@ object TfIdfExample {
     val rescaledData = idfModel.transform(featurizedData)
     rescaledData.select("features", "label").take(3).foreach(println)
     // $example off$
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/TokenizerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/TokenizerExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/TokenizerExample.scala
index c667728..4f0c47b 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/TokenizerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/TokenizerExample.scala
@@ -18,20 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.{RegexTokenizer, Tokenizer}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object TokenizerExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("TokenizerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName("TokenizerExample").getOrCreate()
 
     // $example on$
-    val sentenceDataFrame = sqlContext.createDataFrame(Seq(
+    val sentenceDataFrame = spark.createDataFrame(Seq(
       (0, "Hi I heard about Spark"),
       (1, "I wish Java could use case classes"),
       (2, "Logistic,regression,models,are,neat")
@@ -48,7 +45,8 @@ object TokenizerExample {
     val regexTokenized = regexTokenizer.transform(sentenceDataFrame)
     regexTokenized.select("words", "label").take(3).foreach(println)
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/VectorAssemblerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/VectorAssemblerExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/VectorAssemblerExample.scala
index 768a8c0..56b7263 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/VectorAssemblerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/VectorAssemblerExample.scala
@@ -18,21 +18,18 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.VectorAssembler
 import org.apache.spark.mllib.linalg.Vectors
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object VectorAssemblerExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("VectorAssemblerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("VectorAssemblerExample").getOrCreate()
 
     // $example on$
-    val dataset = sqlContext.createDataFrame(
+    val dataset = spark.createDataFrame(
       Seq((0, 18, 1.0, Vectors.dense(0.0, 10.0, 0.5), 1.0))
     ).toDF("id", "hour", "mobile", "userFeatures", "clicked")
 
@@ -43,7 +40,8 @@ object VectorAssemblerExample {
     val output = assembler.transform(dataset)
     println(output.select("features", "clicked").first())
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/VectorIndexerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/VectorIndexerExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/VectorIndexerExample.scala
index 3bef37b..214ad91 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/VectorIndexerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/VectorIndexerExample.scala
@@ -18,20 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.VectorIndexer
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object VectorIndexerExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("VectorIndexerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("VectorIndexerExample").getOrCreate()
 
     // $example on$
-    val data = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val data = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     val indexer = new VectorIndexer()
       .setInputCol("features")
@@ -48,7 +45,8 @@ object VectorIndexerExample {
     val indexedData = indexerModel.transform(data)
     indexedData.show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/VectorSlicerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/VectorSlicerExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/VectorSlicerExample.scala
index 01377d8..716bf02 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/VectorSlicerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/VectorSlicerExample.scala
@@ -18,31 +18,29 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
+import java.util.Arrays
+
 import org.apache.spark.ml.attribute.{Attribute, AttributeGroup, 
NumericAttribute}
 import org.apache.spark.ml.feature.VectorSlicer
 import org.apache.spark.mllib.linalg.Vectors
 import org.apache.spark.sql.Row
 import org.apache.spark.sql.types.StructType
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object VectorSlicerExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("VectorSlicerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("VectorSlicerExample").getOrCreate()
 
     // $example on$
-    val data = Array(Row(Vectors.dense(-2.0, 2.3, 0.0)))
+    val data = Arrays.asList(Row(Vectors.dense(-2.0, 2.3, 0.0)))
 
     val defaultAttr = NumericAttribute.defaultAttr
     val attrs = Array("f1", "f2", "f3").map(defaultAttr.withName)
     val attrGroup = new AttributeGroup("userFeatures", 
attrs.asInstanceOf[Array[Attribute]])
 
-    val dataRDD = sc.parallelize(data)
-    val dataset = sqlContext.createDataFrame(dataRDD, 
StructType(Array(attrGroup.toStructField())))
+    val dataset = spark.createDataFrame(data, 
StructType(Array(attrGroup.toStructField())))
 
     val slicer = new 
VectorSlicer().setInputCol("userFeatures").setOutputCol("features")
 
@@ -52,7 +50,8 @@ object VectorSlicerExample {
     val output = slicer.transform(dataset)
     println(output.select("userFeatures", "features").first())
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/ml/Word2VecExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/Word2VecExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/Word2VecExample.scala
index e77aa59..292b6d9 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/Word2VecExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/Word2VecExample.scala
@@ -18,21 +18,18 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.Word2Vec
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object Word2VecExample {
   def main(args: Array[String]) {
-    val conf = new SparkConf().setAppName("Word2Vec example")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName("Word2Vec example").getOrCreate()
 
     // $example on$
     // Input data: Each row is a bag of words from a sentence or document.
-    val documentDF = sqlContext.createDataFrame(Seq(
+    val documentDF = spark.createDataFrame(Seq(
       "Hi I heard about Spark".split(" "),
       "I wish Java could use case classes".split(" "),
       "Logistic regression models are neat".split(" ")
@@ -48,6 +45,8 @@ object Word2VecExample {
     val result = model.transform(documentDF)
     result.select("result").take(3).foreach(println)
     // $example off$
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
index e89d555..c2bf154 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
@@ -27,7 +27,7 @@ import org.apache.spark.ml.feature.{CountVectorizer, 
CountVectorizerModel, Regex
 import org.apache.spark.mllib.clustering.{DistributedLDAModel, EMLDAOptimizer, 
LDA, OnlineLDAOptimizer}
 import org.apache.spark.mllib.linalg.Vector
 import org.apache.spark.rdd.RDD
-import org.apache.spark.sql.{Row, SQLContext}
+import org.apache.spark.sql.{Row, SparkSession}
 
 /**
  * An example Latent Dirichlet Allocation (LDA) app. Run with
@@ -189,8 +189,8 @@ object LDAExample {
       vocabSize: Int,
       stopwordFile: String): (RDD[(Long, Vector)], Array[String], Long) = {
 
-    val sqlContext = SQLContext.getOrCreate(sc)
-    import sqlContext.implicits._
+    val spark = SparkSession.builder.getOrCreate()
+    import spark.implicits._
 
     // Get dataset of document texts
     // One document per line in each text file. If the input consists of many 
small files,

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/mllib/RankingMetricsExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/RankingMetricsExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/RankingMetricsExample.scala
index fdb01b8..cd4f0bb 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/mllib/RankingMetricsExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/RankingMetricsExample.scala
@@ -18,22 +18,19 @@
 // scalastyle:off println
 package org.apache.spark.examples.mllib
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.mllib.evaluation.{RankingMetrics, RegressionMetrics}
 import org.apache.spark.mllib.recommendation.{ALS, Rating}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object RankingMetricsExample {
   def main(args: Array[String]) {
-    val conf = new SparkConf().setAppName("RankingMetricsExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
-    import sqlContext.implicits._
+    val spark = 
SparkSession.builder.appName("RankingMetricsExample").getOrCreate()
+    import spark.implicits._
     // $example on$
     // Read in the ratings data
-    val ratings = sc.textFile("data/mllib/sample_movielens_data.txt").map { 
line =>
+    val ratings = 
spark.read.text("data/mllib/sample_movielens_data.txt").rdd.map { line =>
       val fields = line.split("::")
       Rating(fields(0).toInt, fields(1).toInt, fields(2).toDouble - 2.5)
     }.cache()

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/mllib/RegressionMetricsExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/RegressionMetricsExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/RegressionMetricsExample.scala
index add634c..22c47a6 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/mllib/RegressionMetricsExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/RegressionMetricsExample.scala
@@ -18,22 +18,22 @@
 
 package org.apache.spark.examples.mllib
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.mllib.evaluation.RegressionMetrics
-import org.apache.spark.mllib.regression.LinearRegressionWithSGD
-import org.apache.spark.mllib.util.MLUtils
+import org.apache.spark.mllib.linalg.Vector
+import org.apache.spark.mllib.regression.{LabeledPoint, 
LinearRegressionWithSGD}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object RegressionMetricsExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("RegressionMetricsExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("RegressionMetricsExample").getOrCreate()
     // $example on$
     // Load the data
-    val data = MLUtils.loadLibSVMFile(sc, 
"data/mllib/sample_linear_regression_data.txt").cache()
+    val data = spark
+      
.read.format("libsvm").load("data/mllib/sample_linear_regression_data.txt")
+      .rdd.map(row => LabeledPoint(row.getDouble(0), 
row.get(1).asInstanceOf[Vector]))
+      .cache()
 
     // Build the model
     val numIterations = 100
@@ -61,6 +61,8 @@ object RegressionMetricsExample {
     // Explained variance
     println(s"Explained variance = ${metrics.explainedVariance}")
     // $example off$
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
 
b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
index 918e124..2f0fe70 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
@@ -19,9 +19,8 @@
 package org.apache.spark.examples.streaming
 
 import org.apache.spark.SparkConf
-import org.apache.spark.SparkContext
 import org.apache.spark.rdd.RDD
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 import org.apache.spark.storage.StorageLevel
 import org.apache.spark.streaming.{Seconds, StreamingContext, Time}
 
@@ -60,9 +59,9 @@ object SqlNetworkWordCount {
 
     // Convert RDDs of the words DStream to DataFrame and run SQL query
     words.foreachRDD { (rdd: RDD[String], time: Time) =>
-      // Get the singleton instance of SQLContext
-      val sqlContext = SQLContextSingleton.getInstance(rdd.sparkContext)
-      import sqlContext.implicits._
+      // Get the singleton instance of SparkSession
+      val spark = SparkSessionSingleton.getInstance(rdd.sparkContext.getConf)
+      import spark.implicits._
 
       // Convert RDD[String] to RDD[case class] to DataFrame
       val wordsDataFrame = rdd.map(w => Record(w)).toDF()
@@ -72,7 +71,7 @@ object SqlNetworkWordCount {
 
       // Do word count on table using SQL and print it
       val wordCountsDataFrame =
-        sqlContext.sql("select word, count(*) as total from words group by 
word")
+        spark.sql("select word, count(*) as total from words group by word")
       println(s"========= $time =========")
       wordCountsDataFrame.show()
     }
@@ -87,14 +86,14 @@ object SqlNetworkWordCount {
 case class Record(word: String)
 
 
-/** Lazily instantiated singleton instance of SQLContext */
-object SQLContextSingleton {
+/** Lazily instantiated singleton instance of SparkSession */
+object SparkSessionSingleton {
 
-  @transient  private var instance: SQLContext = _
+  @transient  private var instance: SparkSession = _
 
-  def getInstance(sparkContext: SparkContext): SQLContext = {
+  def getInstance(sparkConf: SparkConf): SparkSession = {
     if (instance == null) {
-      instance = new SQLContext(sparkContext)
+      instance = SparkSession.builder.config(sparkConf).getOrCreate()
     }
     instance
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/23789e35/python/pyspark/context.py
----------------------------------------------------------------------
diff --git a/python/pyspark/context.py b/python/pyspark/context.py
index cb15b4b..aec0215 100644
--- a/python/pyspark/context.py
+++ b/python/pyspark/context.py
@@ -952,6 +952,11 @@ class SparkContext(object):
         """
         self.profiler_collector.dump_profiles(path)
 
+    def getConf(self):
+        conf = SparkConf()
+        conf.setAll(self._conf.getAll())
+        return conf
+
 
 def _test():
     import atexit


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to