http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/naive_bayes_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/naive_bayes_example.py 
b/examples/src/main/python/ml/naive_bayes_example.py
index db8fbea..e370355 100644
--- a/examples/src/main/python/ml/naive_bayes_example.py
+++ b/examples/src/main/python/ml/naive_bayes_example.py
@@ -17,21 +17,18 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.classification import NaiveBayes
 from pyspark.ml.evaluation import MulticlassClassificationEvaluator
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-
-    sc = SparkContext(appName="naive_bayes_example")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("naive_bayes_example").getOrCreate()
 
     # $example on$
     # Load training data
-    data = sqlContext.read.format("libsvm") \
+    data = spark.read.format("libsvm") \
         .load("data/mllib/sample_libsvm_data.txt")
     # Split the data into train and test
     splits = data.randomSplit([0.6, 0.4], 1234)
@@ -50,4 +47,4 @@ if __name__ == "__main__":
     print("Precision:" + str(evaluator.evaluate(predictionAndLabels)))
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/normalizer_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/normalizer_example.py 
b/examples/src/main/python/ml/normalizer_example.py
index d490221..ae25537 100644
--- a/examples/src/main/python/ml/normalizer_example.py
+++ b/examples/src/main/python/ml/normalizer_example.py
@@ -17,18 +17,16 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.feature import Normalizer
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="NormalizerExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("NormalizerExample").getOrCreate()
 
     # $example on$
-    dataFrame = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    dataFrame = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     # Normalize each Vector using $L^1$ norm.
     normalizer = Normalizer(inputCol="features", outputCol="normFeatures", 
p=1.0)
@@ -40,4 +38,4 @@ if __name__ == "__main__":
     lInfNormData.show()
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/onehot_encoder_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/onehot_encoder_example.py 
b/examples/src/main/python/ml/onehot_encoder_example.py
index 0f94c26..9acc363 100644
--- a/examples/src/main/python/ml/onehot_encoder_example.py
+++ b/examples/src/main/python/ml/onehot_encoder_example.py
@@ -17,18 +17,16 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.feature import OneHotEncoder, StringIndexer
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="OneHotEncoderExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("OneHotEncoderExample").getOrCreate()
 
     # $example on$
-    df = sqlContext.createDataFrame([
+    df = spark.createDataFrame([
         (0, "a"),
         (1, "b"),
         (2, "c"),
@@ -45,4 +43,4 @@ if __name__ == "__main__":
     encoded.select("id", "categoryVec").show()
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/pca_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/pca_example.py 
b/examples/src/main/python/ml/pca_example.py
index a17181f..adab151 100644
--- a/examples/src/main/python/ml/pca_example.py
+++ b/examples/src/main/python/ml/pca_example.py
@@ -17,26 +17,24 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.feature import PCA
 from pyspark.mllib.linalg import Vectors
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="PCAExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("PCAExample").getOrCreate()
 
     # $example on$
     data = [(Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),),
             (Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),),
             (Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),)]
-    df = sqlContext.createDataFrame(data, ["features"])
+    df = spark.createDataFrame(data, ["features"])
     pca = PCA(k=3, inputCol="features", outputCol="pcaFeatures")
     model = pca.fit(df)
     result = model.transform(df).select("pcaFeatures")
     result.show(truncate=False)
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/pipeline_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/pipeline_example.py 
b/examples/src/main/python/ml/pipeline_example.py
index 3288568..ed9765d 100644
--- a/examples/src/main/python/ml/pipeline_example.py
+++ b/examples/src/main/python/ml/pipeline_example.py
@@ -18,21 +18,20 @@
 """
 Pipeline Example.
 """
-from pyspark import SparkContext, SQLContext
+
 # $example on$
 from pyspark.ml import Pipeline
 from pyspark.ml.classification import LogisticRegression
 from pyspark.ml.feature import HashingTF, Tokenizer
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-
-    sc = SparkContext(appName="PipelineExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("PipelineExample").getOrCreate()
 
     # $example on$
     # Prepare training documents from a list of (id, text, label) tuples.
-    training = sqlContext.createDataFrame([
+    training = spark.createDataFrame([
         (0L, "a b c d e spark", 1.0),
         (1L, "b d", 0.0),
         (2L, "spark f g h", 1.0),
@@ -48,7 +47,7 @@ if __name__ == "__main__":
     model = pipeline.fit(training)
 
     # Prepare test documents, which are unlabeled (id, text) tuples.
-    test = sqlContext.createDataFrame([
+    test = spark.createDataFrame([
         (4L, "spark i j k"),
         (5L, "l m n"),
         (6L, "mapreduce spark"),
@@ -61,4 +60,4 @@ if __name__ == "__main__":
         print(row)
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/polynomial_expansion_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/polynomial_expansion_example.py 
b/examples/src/main/python/ml/polynomial_expansion_example.py
index 89f5cbe..328b559 100644
--- a/examples/src/main/python/ml/polynomial_expansion_example.py
+++ b/examples/src/main/python/ml/polynomial_expansion_example.py
@@ -17,19 +17,17 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.feature import PolynomialExpansion
 from pyspark.mllib.linalg import Vectors
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="PolynomialExpansionExample")
-    sqlContext = SQLContext(sc)
+    spark = 
SparkSession.builder.appName("PolynomialExpansionExample").getOrCreate()
 
     # $example on$
-    df = sqlContext\
+    df = spark\
         .createDataFrame([(Vectors.dense([-2.0, 2.3]),),
                           (Vectors.dense([0.0, 0.0]),),
                           (Vectors.dense([0.6, -1.1]),)],
@@ -40,4 +38,4 @@ if __name__ == "__main__":
         print(expanded)
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/random_forest_classifier_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/random_forest_classifier_example.py 
b/examples/src/main/python/ml/random_forest_classifier_example.py
index c357043..b0a93e0 100644
--- a/examples/src/main/python/ml/random_forest_classifier_example.py
+++ b/examples/src/main/python/ml/random_forest_classifier_example.py
@@ -20,21 +20,20 @@ Random Forest Classifier Example.
 """
 from __future__ import print_function
 
-from pyspark import SparkContext, SQLContext
 # $example on$
 from pyspark.ml import Pipeline
 from pyspark.ml.classification import RandomForestClassifier
 from pyspark.ml.feature import StringIndexer, VectorIndexer
 from pyspark.ml.evaluation import MulticlassClassificationEvaluator
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="random_forest_classifier_example")
-    sqlContext = SQLContext(sc)
+    spark = 
SparkSession.builder.appName("random_forest_classifier_example").getOrCreate()
 
     # $example on$
     # Load and parse the data file, converting it to a DataFrame.
-    data = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    data = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     # Index labels, adding metadata to the label column.
     # Fit on whole dataset to include all labels in index.
@@ -72,4 +71,4 @@ if __name__ == "__main__":
     print(rfModel)  # summary only
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/random_forest_regressor_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/random_forest_regressor_example.py 
b/examples/src/main/python/ml/random_forest_regressor_example.py
index b77014f..4bb84f0 100644
--- a/examples/src/main/python/ml/random_forest_regressor_example.py
+++ b/examples/src/main/python/ml/random_forest_regressor_example.py
@@ -20,21 +20,20 @@ Random Forest Regressor Example.
 """
 from __future__ import print_function
 
-from pyspark import SparkContext, SQLContext
 # $example on$
 from pyspark.ml import Pipeline
 from pyspark.ml.regression import RandomForestRegressor
 from pyspark.ml.feature import VectorIndexer
 from pyspark.ml.evaluation import RegressionEvaluator
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="random_forest_regressor_example")
-    sqlContext = SQLContext(sc)
+    spark = 
SparkSession.builder.appName("random_forest_regressor_example").getOrCreate()
 
     # $example on$
     # Load and parse the data file, converting it to a DataFrame.
-    data = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    data = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     # Automatically identify categorical features, and index them.
     # Set maxCategories so features with > 4 distinct values are treated as 
continuous.
@@ -69,4 +68,4 @@ if __name__ == "__main__":
     print(rfModel)  # summary only
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/rformula_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/rformula_example.py 
b/examples/src/main/python/ml/rformula_example.py
index b544a14..45cc116 100644
--- a/examples/src/main/python/ml/rformula_example.py
+++ b/examples/src/main/python/ml/rformula_example.py
@@ -17,18 +17,16 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.feature import RFormula
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="RFormulaExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("RFormulaExample").getOrCreate()
 
     # $example on$
-    dataset = sqlContext.createDataFrame(
+    dataset = spark.createDataFrame(
         [(7, "US", 18, 1.0),
          (8, "CA", 12, 0.0),
          (9, "NZ", 15, 0.0)],
@@ -41,4 +39,4 @@ if __name__ == "__main__":
     output.select("features", "label").show()
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/simple_text_classification_pipeline.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/simple_text_classification_pipeline.py 
b/examples/src/main/python/ml/simple_text_classification_pipeline.py
index b4f06bf..3600c12 100644
--- a/examples/src/main/python/ml/simple_text_classification_pipeline.py
+++ b/examples/src/main/python/ml/simple_text_classification_pipeline.py
@@ -17,11 +17,10 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
 from pyspark.ml import Pipeline
 from pyspark.ml.classification import LogisticRegression
 from pyspark.ml.feature import HashingTF, Tokenizer
-from pyspark.sql import Row, SQLContext
+from pyspark.sql import Row, SparkSession
 
 
 """
@@ -34,16 +33,15 @@ pipeline in Python. Run with:
 
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="SimpleTextClassificationPipeline")
-    sqlContext = SQLContext(sc)
+    spark = 
SparkSession.builder.appName("SimpleTextClassificationPipeline").getOrCreate()
 
     # Prepare training documents, which are labeled.
-    LabeledDocument = Row("id", "text", "label")
-    training = sc.parallelize([(0, "a b c d e spark", 1.0),
-                               (1, "b d", 0.0),
-                               (2, "spark f g h", 1.0),
-                               (3, "hadoop mapreduce", 0.0)]) \
-        .map(lambda x: LabeledDocument(*x)).toDF()
+    training = spark.createDataFrame([
+        (0, "a b c d e spark", 1.0),
+        (1, "b d", 0.0),
+        (2, "spark f g h", 1.0),
+        (3, "hadoop mapreduce", 0.0)
+    ], ["id", "text", "label"])
 
     # Configure an ML pipeline, which consists of tree stages: tokenizer, 
hashingTF, and lr.
     tokenizer = Tokenizer(inputCol="text", outputCol="words")
@@ -55,12 +53,12 @@ if __name__ == "__main__":
     model = pipeline.fit(training)
 
     # Prepare test documents, which are unlabeled.
-    Document = Row("id", "text")
-    test = sc.parallelize([(4, "spark i j k"),
-                           (5, "l m n"),
-                           (6, "spark hadoop spark"),
-                           (7, "apache hadoop")]) \
-        .map(lambda x: Document(*x)).toDF()
+    test = spark.createDataFrame([
+        (4, "spark i j k"),
+        (5, "l m n"),
+        (6, "spark hadoop spark"),
+        (7, "apache hadoop")
+    ], ["id", "text"])
 
     # Make predictions on test documents and print columns of interest.
     prediction = model.transform(test)
@@ -68,4 +66,4 @@ if __name__ == "__main__":
     for row in selected.collect():
         print(row)
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/sql_transformer.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/sql_transformer.py 
b/examples/src/main/python/ml/sql_transformer.py
index 9575d72..26045db 100644
--- a/examples/src/main/python/ml/sql_transformer.py
+++ b/examples/src/main/python/ml/sql_transformer.py
@@ -17,18 +17,16 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
 # $example on$
 from pyspark.ml.feature import SQLTransformer
 # $example off$
-from pyspark.sql import SQLContext
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="SQLTransformerExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("SQLTransformerExample").getOrCreate()
 
     # $example on$
-    df = sqlContext.createDataFrame([
+    df = spark.createDataFrame([
         (0, 1.0, 3.0),
         (2, 2.0, 5.0)
     ], ["id", "v1", "v2"])
@@ -37,4 +35,4 @@ if __name__ == "__main__":
     sqlTrans.transform(df).show()
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/standard_scaler_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/standard_scaler_example.py 
b/examples/src/main/python/ml/standard_scaler_example.py
index ae7aa85..c50804f 100644
--- a/examples/src/main/python/ml/standard_scaler_example.py
+++ b/examples/src/main/python/ml/standard_scaler_example.py
@@ -17,18 +17,16 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.feature import StandardScaler
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="StandardScalerExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("StandardScalerExample").getOrCreate()
 
     # $example on$
-    dataFrame = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    dataFrame = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
     scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures",
                             withStd=True, withMean=False)
 
@@ -40,4 +38,4 @@ if __name__ == "__main__":
     scaledData.show()
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/stopwords_remover_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/stopwords_remover_example.py 
b/examples/src/main/python/ml/stopwords_remover_example.py
index 01f94af..5736267 100644
--- a/examples/src/main/python/ml/stopwords_remover_example.py
+++ b/examples/src/main/python/ml/stopwords_remover_example.py
@@ -17,18 +17,16 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.feature import StopWordsRemover
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="StopWordsRemoverExample")
-    sqlContext = SQLContext(sc)
+    spark = 
SparkSession.builder.appName("StopWordsRemoverExample").getOrCreate()
 
     # $example on$
-    sentenceData = sqlContext.createDataFrame([
+    sentenceData = spark.createDataFrame([
         (0, ["I", "saw", "the", "red", "baloon"]),
         (1, ["Mary", "had", "a", "little", "lamb"])
     ], ["label", "raw"])
@@ -37,4 +35,4 @@ if __name__ == "__main__":
     remover.transform(sentenceData).show(truncate=False)
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/string_indexer_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/string_indexer_example.py 
b/examples/src/main/python/ml/string_indexer_example.py
index 58a8cb5..aacd4f9 100644
--- a/examples/src/main/python/ml/string_indexer_example.py
+++ b/examples/src/main/python/ml/string_indexer_example.py
@@ -17,18 +17,16 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.feature import StringIndexer
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="StringIndexerExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("StringIndexerExample").getOrCreate()
 
     # $example on$
-    df = sqlContext.createDataFrame(
+    df = spark.createDataFrame(
         [(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")],
         ["id", "category"])
     indexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
@@ -36,4 +34,4 @@ if __name__ == "__main__":
     indexed.show()
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/tf_idf_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/tf_idf_example.py 
b/examples/src/main/python/ml/tf_idf_example.py
index 141324d..25df816 100644
--- a/examples/src/main/python/ml/tf_idf_example.py
+++ b/examples/src/main/python/ml/tf_idf_example.py
@@ -17,18 +17,16 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
 # $example on$
 from pyspark.ml.feature import HashingTF, IDF, Tokenizer
 # $example off$
-from pyspark.sql import SQLContext
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="TfIdfExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("TfIdfExample").getOrCreate()
 
     # $example on$
-    sentenceData = sqlContext.createDataFrame([
+    sentenceData = spark.createDataFrame([
         (0, "Hi I heard about Spark"),
         (0, "I wish Java could use case classes"),
         (1, "Logistic regression models are neat")
@@ -46,4 +44,4 @@ if __name__ == "__main__":
         print(features_label)
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/tokenizer_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/tokenizer_example.py 
b/examples/src/main/python/ml/tokenizer_example.py
index ce9b225..5be4b4c 100644
--- a/examples/src/main/python/ml/tokenizer_example.py
+++ b/examples/src/main/python/ml/tokenizer_example.py
@@ -17,18 +17,16 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.feature import Tokenizer, RegexTokenizer
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="TokenizerExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("TokenizerExample").getOrCreate()
 
     # $example on$
-    sentenceDataFrame = sqlContext.createDataFrame([
+    sentenceDataFrame = spark.createDataFrame([
         (0, "Hi I heard about Spark"),
         (1, "I wish Java could use case classes"),
         (2, "Logistic,regression,models,are,neat")
@@ -41,4 +39,4 @@ if __name__ == "__main__":
     # alternatively, pattern="\\w+", gaps(False)
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/train_validation_split.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/train_validation_split.py 
b/examples/src/main/python/ml/train_validation_split.py
index 161a200..2e43a0f 100644
--- a/examples/src/main/python/ml/train_validation_split.py
+++ b/examples/src/main/python/ml/train_validation_split.py
@@ -15,13 +15,12 @@
 # limitations under the License.
 #
 
-from pyspark import SparkContext
 # $example on$
 from pyspark.ml.evaluation import RegressionEvaluator
 from pyspark.ml.regression import LinearRegression
 from pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit
-from pyspark.sql import SQLContext
 # $example off$
+from pyspark.sql import SparkSession
 
 """
 This example demonstrates applying TrainValidationSplit to split data
@@ -32,11 +31,10 @@ Run with:
 """
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="TrainValidationSplit")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("TrainValidationSplit").getOrCreate()
     # $example on$
     # Prepare training and test data.
-    data = sqlContext.read.format("libsvm")\
+    data = spark.read.format("libsvm")\
         .load("data/mllib/sample_linear_regression_data.txt")
     train, test = data.randomSplit([0.7, 0.3])
     lr = LinearRegression(maxIter=10, regParam=0.1)
@@ -65,4 +63,4 @@ if __name__ == "__main__":
     for row in prediction.take(5):
         print(row)
     # $example off$
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/vector_assembler_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/vector_assembler_example.py 
b/examples/src/main/python/ml/vector_assembler_example.py
index 04f6483..019a9ea 100644
--- a/examples/src/main/python/ml/vector_assembler_example.py
+++ b/examples/src/main/python/ml/vector_assembler_example.py
@@ -17,19 +17,17 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.mllib.linalg import Vectors
 from pyspark.ml.feature import VectorAssembler
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="VectorAssemblerExample")
-    sqlContext = SQLContext(sc)
+    spark = 
SparkSession.builder.appName("VectorAssemblerExample").getOrCreate()
 
     # $example on$
-    dataset = sqlContext.createDataFrame(
+    dataset = spark.createDataFrame(
         [(0, 18, 1.0, Vectors.dense([0.0, 10.0, 0.5]), 1.0)],
         ["id", "hour", "mobile", "userFeatures", "clicked"])
     assembler = VectorAssembler(
@@ -39,4 +37,4 @@ if __name__ == "__main__":
     print(output.select("features", "clicked").first())
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/vector_indexer_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/vector_indexer_example.py 
b/examples/src/main/python/ml/vector_indexer_example.py
index 146f41c..3cf5b8e 100644
--- a/examples/src/main/python/ml/vector_indexer_example.py
+++ b/examples/src/main/python/ml/vector_indexer_example.py
@@ -17,18 +17,16 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.feature import VectorIndexer
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="VectorIndexerExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("VectorIndexerExample").getOrCreate()
 
     # $example on$
-    data = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    data = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
     indexer = VectorIndexer(inputCol="features", outputCol="indexed", 
maxCategories=10)
     indexerModel = indexer.fit(data)
 
@@ -37,4 +35,4 @@ if __name__ == "__main__":
     indexedData.show()
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/vector_slicer_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/vector_slicer_example.py 
b/examples/src/main/python/ml/vector_slicer_example.py
index 31a7530..0531bcd 100644
--- a/examples/src/main/python/ml/vector_slicer_example.py
+++ b/examples/src/main/python/ml/vector_slicer_example.py
@@ -17,20 +17,18 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.feature import VectorSlicer
 from pyspark.mllib.linalg import Vectors
 from pyspark.sql.types import Row
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="VectorSlicerExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("VectorSlicerExample").getOrCreate()
 
     # $example on$
-    df = sqlContext.createDataFrame([
+    df = spark.createDataFrame([
         Row(userFeatures=Vectors.sparse(3, {0: -2.0, 1: 2.3}),),
         Row(userFeatures=Vectors.dense([-2.0, 2.3, 0.0]),)])
 
@@ -41,4 +39,4 @@ if __name__ == "__main__":
     output.select("userFeatures", "features").show()
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/ml/word2vec_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/word2vec_example.py 
b/examples/src/main/python/ml/word2vec_example.py
index 53c77fe..6766a7b 100644
--- a/examples/src/main/python/ml/word2vec_example.py
+++ b/examples/src/main/python/ml/word2vec_example.py
@@ -17,19 +17,17 @@
 
 from __future__ import print_function
 
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
 # $example on$
 from pyspark.ml.feature import Word2Vec
 # $example off$
+from pyspark.sql import SparkSession
 
 if __name__ == "__main__":
-    sc = SparkContext(appName="Word2VecExample")
-    sqlContext = SQLContext(sc)
+    spark = SparkSession.builder.appName("Word2VecExample").getOrCreate()
 
     # $example on$
     # Input data: Each row is a bag of words from a sentence or document.
-    documentDF = sqlContext.createDataFrame([
+    documentDF = spark.createDataFrame([
         ("Hi I heard about Spark".split(" "), ),
         ("I wish Java could use case classes".split(" "), ),
         ("Logistic regression models are neat".split(" "), )
@@ -42,4 +40,4 @@ if __name__ == "__main__":
         print(feature)
     # $example off$
 
-    sc.stop()
+    spark.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/mllib/binary_classification_metrics_example.py
----------------------------------------------------------------------
diff --git 
a/examples/src/main/python/mllib/binary_classification_metrics_example.py 
b/examples/src/main/python/mllib/binary_classification_metrics_example.py
index 4e7ea28..8f0fc9d4 100644
--- a/examples/src/main/python/mllib/binary_classification_metrics_example.py
+++ b/examples/src/main/python/mllib/binary_classification_metrics_example.py
@@ -18,7 +18,7 @@
 Binary Classification Metrics Example.
 """
 from __future__ import print_function
-from pyspark import SparkContext, SQLContext
+from pyspark import SparkContext
 # $example on$
 from pyspark.mllib.classification import LogisticRegressionWithLBFGS
 from pyspark.mllib.evaluation import BinaryClassificationMetrics
@@ -27,7 +27,7 @@ from pyspark.mllib.util import MLUtils
 
 if __name__ == "__main__":
     sc = SparkContext(appName="BinaryClassificationMetricsExample")
-    sqlContext = SQLContext(sc)
+
     # $example on$
     # Several of the methods available in scala are currently missing from 
pyspark
     # Load training data in LIBSVM format
@@ -52,3 +52,5 @@ if __name__ == "__main__":
     # Area under ROC curve
     print("Area under ROC = %s" % metrics.areaUnderROC)
     # $example off$
+
+    sc.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/sql.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/sql.py b/examples/src/main/python/sql.py
index ea6a22d..59a46cb 100644
--- a/examples/src/main/python/sql.py
+++ b/examples/src/main/python/sql.py
@@ -63,7 +63,7 @@ if __name__ == "__main__":
     #  |-- age: long (nullable = true)
     #  |-- name: string (nullable = true)
 
-    # Register this DataFrame as a table.
+    # Register this DataFrame as a temporary table.
     people.registerTempTable("people")
 
     # SQL statements can be run by using the sql methods provided by sqlContext

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/python/streaming/sql_network_wordcount.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/streaming/sql_network_wordcount.py 
b/examples/src/main/python/streaming/sql_network_wordcount.py
index 1ba5e9f..588cbfe 100644
--- a/examples/src/main/python/streaming/sql_network_wordcount.py
+++ b/examples/src/main/python/streaming/sql_network_wordcount.py
@@ -33,13 +33,14 @@ import sys
 
 from pyspark import SparkContext
 from pyspark.streaming import StreamingContext
-from pyspark.sql import SQLContext, Row
+from pyspark.sql import Row, SparkSession
 
 
-def getSqlContextInstance(sparkContext):
-    if ('sqlContextSingletonInstance' not in globals()):
-        globals()['sqlContextSingletonInstance'] = SQLContext(sparkContext)
-    return globals()['sqlContextSingletonInstance']
+def getSparkSessionInstance(sparkConf):
+    if ('sparkSessionSingletonInstance' not in globals()):
+        globals()['sparkSessionSingletonInstance'] =\
+            SparkSession.builder.config(conf=sparkConf).getOrCreate()
+    return globals()['sparkSessionSingletonInstance']
 
 
 if __name__ == "__main__":
@@ -60,19 +61,19 @@ if __name__ == "__main__":
         print("========= %s =========" % str(time))
 
         try:
-            # Get the singleton instance of SQLContext
-            sqlContext = getSqlContextInstance(rdd.context)
+            # Get the singleton instance of SparkSession
+            spark = getSparkSessionInstance(rdd.context.getConf())
 
             # Convert RDD[String] to RDD[Row] to DataFrame
             rowRdd = rdd.map(lambda w: Row(word=w))
-            wordsDataFrame = sqlContext.createDataFrame(rowRdd)
+            wordsDataFrame = spark.createDataFrame(rowRdd)
 
             # Register as table
             wordsDataFrame.registerTempTable("words")
 
             # Do word count on table using SQL and print it
             wordCountsDataFrame = \
-                sqlContext.sql("select word, count(*) as total from words 
group by word")
+                spark.sql("select word, count(*) as total from words group by 
word")
             wordCountsDataFrame.show()
         except:
             pass

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/AFTSurvivalRegressionExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/AFTSurvivalRegressionExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/AFTSurvivalRegressionExample.scala
index 21f58dd..3795af8 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/AFTSurvivalRegressionExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/AFTSurvivalRegressionExample.scala
@@ -18,12 +18,11 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.regression.AFTSurvivalRegression
 import org.apache.spark.mllib.linalg.Vectors
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 /**
  * An example for AFTSurvivalRegression.
@@ -31,12 +30,10 @@ import org.apache.spark.sql.SQLContext
 object AFTSurvivalRegressionExample {
 
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("AFTSurvivalRegressionExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("AFTSurvivalRegressionExample").getOrCreate()
 
     // $example on$
-    val training = sqlContext.createDataFrame(Seq(
+    val training = spark.createDataFrame(Seq(
       (1.218, 1.0, Vectors.dense(1.560, -0.605)),
       (2.949, 0.0, Vectors.dense(0.346, 2.158)),
       (3.627, 0.0, Vectors.dense(1.380, 0.231)),
@@ -56,7 +53,7 @@ object AFTSurvivalRegressionExample {
     model.transform(training).show(false)
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/ALSExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/ALSExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/ALSExample.scala
index a79e15c..41750ca 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/ALSExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/ALSExample.scala
@@ -18,12 +18,11 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.evaluation.RegressionEvaluator
 import org.apache.spark.ml.recommendation.ALS
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 // $example on$
 import org.apache.spark.sql.functions._
 import org.apache.spark.sql.types.DoubleType
@@ -43,13 +42,11 @@ object ALSExample {
   // $example off$
 
   def main(args: Array[String]) {
-    val conf = new SparkConf().setAppName("ALSExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
-    import sqlContext.implicits._
+    val spark = SparkSession.builder.appName("ALSExample").getOrCreate()
+    import spark.implicits._
 
     // $example on$
-    val ratings = sc.textFile("data/mllib/als/sample_movielens_ratings.txt")
+    val ratings = 
spark.read.text("data/mllib/als/sample_movielens_ratings.txt")
       .map(Rating.parseRating)
       .toDF()
     val Array(training, test) = ratings.randomSplit(Array(0.8, 0.2))
@@ -75,7 +72,8 @@ object ALSExample {
     val rmse = evaluator.evaluate(predictions)
     println(s"Root-mean-square error = $rmse")
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/BinarizerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/BinarizerExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/BinarizerExample.scala
index 2ed8101..93c153f 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/BinarizerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/BinarizerExample.scala
@@ -18,20 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.Binarizer
 // $example off$
-import org.apache.spark.sql.{DataFrame, SQLContext}
+import org.apache.spark.sql.{DataFrame, SparkSession}
 
 object BinarizerExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("BinarizerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName("BinarizerExample").getOrCreate()
     // $example on$
     val data = Array((0, 0.1), (1, 0.8), (2, 0.2))
-    val dataFrame: DataFrame = sqlContext.createDataFrame(data).toDF("label", 
"feature")
+    val dataFrame: DataFrame = spark.createDataFrame(data).toDF("label", 
"feature")
 
     val binarizer: Binarizer = new Binarizer()
       .setInputCol("feature")
@@ -42,7 +39,8 @@ object BinarizerExample {
     val binarizedFeatures = binarizedDataFrame.select("binarized_feature")
     binarizedFeatures.collect().foreach(println)
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/BucketizerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/BucketizerExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/BucketizerExample.scala
index 6f6236a..779ad33 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/BucketizerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/BucketizerExample.scala
@@ -18,23 +18,20 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.Bucketizer
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object BucketizerExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("BucketizerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName("BucketizerExample").getOrCreate()
 
     // $example on$
     val splits = Array(Double.NegativeInfinity, -0.5, 0.0, 0.5, 
Double.PositiveInfinity)
 
     val data = Array(-0.5, -0.3, 0.0, 0.2)
-    val dataFrame = 
sqlContext.createDataFrame(data.map(Tuple1.apply)).toDF("features")
+    val dataFrame = 
spark.createDataFrame(data.map(Tuple1.apply)).toDF("features")
 
     val bucketizer = new Bucketizer()
       .setInputCol("features")
@@ -45,7 +42,7 @@ object BucketizerExample {
     val bucketedData = bucketizer.transform(dataFrame)
     bucketedData.show()
     // $example off$
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/ChiSqSelectorExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/ChiSqSelectorExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/ChiSqSelectorExample.scala
index 2be6153..84ca1f0 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/ChiSqSelectorExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/ChiSqSelectorExample.scala
@@ -18,20 +18,16 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.ChiSqSelector
 import org.apache.spark.mllib.linalg.Vectors
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object ChiSqSelectorExample {
   def main(args: Array[String]) {
-    val conf = new SparkConf().setAppName("ChiSqSelectorExample")
-    val sc = new SparkContext(conf)
-
-    val sqlContext = SQLContext.getOrCreate(sc)
-    import sqlContext.implicits._
+    val spark = 
SparkSession.builder.appName("ChiSqSelectorExample").getOrCreate()
+    import spark.implicits._
 
     // $example on$
     val data = Seq(
@@ -40,7 +36,7 @@ object ChiSqSelectorExample {
       (9, Vectors.dense(1.0, 0.0, 15.0, 0.1), 0.0)
     )
 
-    val df = sc.parallelize(data).toDF("id", "features", "clicked")
+    val df = spark.createDataset(data).toDF("id", "features", "clicked")
 
     val selector = new ChiSqSelector()
       .setNumTopFeatures(1)
@@ -51,7 +47,7 @@ object ChiSqSelectorExample {
     val result = selector.fit(df).transform(df)
     result.show()
     // $example off$
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/CountVectorizerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/CountVectorizerExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/CountVectorizerExample.scala
index 7d07fc7..9ab43a4 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/CountVectorizerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/CountVectorizerExample.scala
@@ -18,20 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.{CountVectorizer, CountVectorizerModel}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object CountVectorizerExample {
   def main(args: Array[String]) {
-    val conf = new SparkConf().setAppName("CounterVectorizerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("CounterVectorizerExample").getOrCreate()
 
     // $example on$
-    val df = sqlContext.createDataFrame(Seq(
+    val df = spark.createDataFrame(Seq(
       (0, Array("a", "b", "c")),
       (1, Array("a", "b", "b", "c", "a"))
     )).toDF("id", "words")
@@ -51,6 +48,8 @@ object CountVectorizerExample {
 
     cvModel.transform(df).select("features").show()
     // $example off$
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/DCTExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/DCTExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/DCTExample.scala
index dc26b55..b415333 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/DCTExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/DCTExample.scala
@@ -18,18 +18,15 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.DCT
 import org.apache.spark.mllib.linalg.Vectors
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object DCTExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("DCTExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName("DCTExample").getOrCreate()
 
     // $example on$
     val data = Seq(
@@ -37,7 +34,7 @@ object DCTExample {
       Vectors.dense(-1.0, 2.0, 4.0, -7.0),
       Vectors.dense(14.0, -2.0, -5.0, 1.0))
 
-    val df = 
sqlContext.createDataFrame(data.map(Tuple1.apply)).toDF("features")
+    val df = spark.createDataFrame(data.map(Tuple1.apply)).toDF("features")
 
     val dct = new DCT()
       .setInputCol("features")
@@ -47,7 +44,8 @@ object DCTExample {
     val dctDf = dct.transform(df)
     dctDf.select("featuresDCT").show(3)
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
index 7e608a2..2f892f8 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
@@ -23,11 +23,10 @@ import java.io.File
 import com.google.common.io.Files
 import scopt.OptionParser
 
-import org.apache.spark.{SparkConf, SparkContext}
 import org.apache.spark.examples.mllib.AbstractParams
 import org.apache.spark.mllib.linalg.Vector
 import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer
-import org.apache.spark.sql.{DataFrame, Row, SQLContext}
+import org.apache.spark.sql.{DataFrame, Row, SparkSession}
 
 /**
  * An example of how to use [[org.apache.spark.sql.DataFrame]] for ML. Run with
@@ -62,14 +61,11 @@ object DataFrameExample {
   }
 
   def run(params: Params) {
-
-    val conf = new SparkConf().setAppName(s"DataFrameExample with $params")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.appName(s"DataFrameExample with 
$params").getOrCreate()
 
     // Load input data
     println(s"Loading LIBSVM file with UDT from ${params.input}.")
-    val df: DataFrame = 
sqlContext.read.format("libsvm").load(params.input).cache()
+    val df: DataFrame = spark.read.format("libsvm").load(params.input).cache()
     println("Schema from LIBSVM:")
     df.printSchema()
     println(s"Loaded training data as a DataFrame with ${df.count()} records.")
@@ -94,11 +90,11 @@ object DataFrameExample {
 
     // Load the records back.
     println(s"Loading Parquet file with UDT from $outputDir.")
-    val newDF = sqlContext.read.parquet(outputDir)
+    val newDF = spark.read.parquet(outputDir)
     println(s"Schema from Parquet:")
     newDF.printSchema()
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala
index 224d8da..a0a2e1f 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala
@@ -18,7 +18,6 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.Pipeline
 import org.apache.spark.ml.classification.DecisionTreeClassificationModel
@@ -26,16 +25,14 @@ import 
org.apache.spark.ml.classification.DecisionTreeClassifier
 import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
 import org.apache.spark.ml.feature.{IndexToString, StringIndexer, 
VectorIndexer}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object DecisionTreeClassificationExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("DecisionTreeClassificationExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("DecisionTreeClassificationExample").getOrCreate()
     // $example on$
     // Load the data stored in LIBSVM format as a DataFrame.
-    val data = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val data = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     // Index labels, adding metadata to the label column.
     // Fit on whole dataset to include all labels in index.
@@ -88,6 +85,8 @@ object DecisionTreeClassificationExample {
     val treeModel = 
model.stages(2).asInstanceOf[DecisionTreeClassificationModel]
     println("Learned classification tree model:\n" + treeModel.toDebugString)
     // $example off$
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
index d2560cc..cea1d80 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
@@ -33,7 +33,7 @@ import org.apache.spark.ml.util.MetadataUtils
 import org.apache.spark.mllib.evaluation.{MulticlassMetrics, RegressionMetrics}
 import org.apache.spark.mllib.linalg.Vector
 import org.apache.spark.mllib.util.MLUtils
-import org.apache.spark.sql.{DataFrame, SQLContext}
+import org.apache.spark.sql.{DataFrame, SparkSession}
 
 /**
  * An example runner for decision trees. Run with
@@ -134,18 +134,18 @@ object DecisionTreeExample {
 
   /** Load a dataset from the given path, using the given format */
   private[ml] def loadData(
-      sqlContext: SQLContext,
+      spark: SparkSession,
       path: String,
       format: String,
       expectedNumFeatures: Option[Int] = None): DataFrame = {
-    import sqlContext.implicits._
+    import spark.implicits._
 
     format match {
-      case "dense" => MLUtils.loadLabeledPoints(sqlContext.sparkContext, 
path).toDF()
+      case "dense" => MLUtils.loadLabeledPoints(spark.sparkContext, 
path).toDF()
       case "libsvm" => expectedNumFeatures match {
-        case Some(numFeatures) => sqlContext.read.option("numFeatures", 
numFeatures.toString)
+        case Some(numFeatures) => spark.read.option("numFeatures", 
numFeatures.toString)
           .format("libsvm").load(path)
-        case None => sqlContext.read.format("libsvm").load(path)
+        case None => spark.read.format("libsvm").load(path)
       }
       case _ => throw new IllegalArgumentException(s"Bad data format: $format")
     }
@@ -167,17 +167,17 @@ object DecisionTreeExample {
       testInput: String,
       algo: String,
       fracTest: Double): (DataFrame, DataFrame) = {
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession.builder.getOrCreate()
 
     // Load training data
-    val origExamples: DataFrame = loadData(sqlContext, input, dataFormat)
+    val origExamples: DataFrame = loadData(spark, input, dataFormat)
 
     // Load or create test set
     val dataframes: Array[DataFrame] = if (testInput != "") {
       // Load testInput.
       val numFeatures = origExamples.first().getAs[Vector](1).size
       val origTestExamples: DataFrame =
-        loadData(sqlContext, testInput, dataFormat, Some(numFeatures))
+        loadData(spark, testInput, dataFormat, Some(numFeatures))
       Array(origExamples, origTestExamples)
     } else {
       // Split input into training, test.

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeRegressionExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeRegressionExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeRegressionExample.scala
index ad32e56..26b52d0 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeRegressionExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeRegressionExample.scala
@@ -18,7 +18,6 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.Pipeline
 import org.apache.spark.ml.evaluation.RegressionEvaluator
@@ -26,17 +25,15 @@ import org.apache.spark.ml.feature.VectorIndexer
 import org.apache.spark.ml.regression.DecisionTreeRegressionModel
 import org.apache.spark.ml.regression.DecisionTreeRegressor
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object DecisionTreeRegressionExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("DecisionTreeRegressionExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("DecisionTreeRegressionExample").getOrCreate()
 
     // $example on$
     // Load the data stored in LIBSVM format as a DataFrame.
-    val data = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val data = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     // Automatically identify categorical features, and index them.
     // Here, we treat features with > 4 distinct values as continuous.
@@ -78,6 +75,8 @@ object DecisionTreeRegressionExample {
     val treeModel = model.stages(1).asInstanceOf[DecisionTreeRegressionModel]
     println("Learned regression tree model:\n" + treeModel.toDebugString)
     // $example off$
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala
index 8d127f9..2aa1ab1 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala
@@ -18,13 +18,12 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 import org.apache.spark.ml.classification.{ClassificationModel, Classifier, 
ClassifierParams}
 import org.apache.spark.ml.param.{IntParam, ParamMap}
 import org.apache.spark.ml.util.Identifiable
 import org.apache.spark.mllib.linalg.{BLAS, Vector, Vectors}
 import org.apache.spark.mllib.regression.LabeledPoint
-import org.apache.spark.sql.{DataFrame, Dataset, Row, SQLContext}
+import org.apache.spark.sql.{Dataset, Row, SparkSession}
 
 /**
  * A simple example demonstrating how to write your own learning algorithm 
using Estimator,
@@ -38,13 +37,11 @@ import org.apache.spark.sql.{DataFrame, Dataset, Row, 
SQLContext}
 object DeveloperApiExample {
 
   def main(args: Array[String]) {
-    val conf = new SparkConf().setAppName("DeveloperApiExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
-    import sqlContext.implicits._
+    val spark = 
SparkSession.builder.appName("DeveloperApiExample").getOrCreate()
+    import spark.implicits._
 
     // Prepare training data.
-    val training = sc.parallelize(Seq(
+    val training = spark.createDataFrame(Seq(
       LabeledPoint(1.0, Vectors.dense(0.0, 1.1, 0.1)),
       LabeledPoint(0.0, Vectors.dense(2.0, 1.0, -1.0)),
       LabeledPoint(0.0, Vectors.dense(2.0, 1.3, 1.0)),
@@ -62,13 +59,13 @@ object DeveloperApiExample {
     val model = lr.fit(training.toDF())
 
     // Prepare test data.
-    val test = sc.parallelize(Seq(
+    val test = spark.createDataFrame(Seq(
       LabeledPoint(1.0, Vectors.dense(-1.0, 1.5, 1.3)),
       LabeledPoint(0.0, Vectors.dense(3.0, 2.0, -0.1)),
       LabeledPoint(1.0, Vectors.dense(0.0, 2.2, -1.5))))
 
     // Make predictions on test data.
-    val sumPredictions: Double = model.transform(test.toDF())
+    val sumPredictions: Double = model.transform(test)
       .select("features", "label", "prediction")
       .collect()
       .map { case Row(features: Vector, label: Double, prediction: Double) =>
@@ -77,7 +74,7 @@ object DeveloperApiExample {
     assert(sumPredictions == 0.0,
       "MyLogisticRegression predicted something other than 0, even though all 
coefficients are 0!")
 
-    sc.stop()
+    spark.stop()
   }
 }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/ElementwiseProductExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/ElementwiseProductExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/ElementwiseProductExample.scala
index 629d322..f289c28 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/ElementwiseProductExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/ElementwiseProductExample.scala
@@ -18,22 +18,19 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.ElementwiseProduct
 import org.apache.spark.mllib.linalg.Vectors
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object ElementwiseProductExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("ElementwiseProductExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("ElementwiseProductExample").getOrCreate()
 
     // $example on$
     // Create some vector data; also works for sparse vectors
-    val dataFrame = sqlContext.createDataFrame(Seq(
+    val dataFrame = spark.createDataFrame(Seq(
       ("a", Vectors.dense(1.0, 2.0, 3.0)),
       ("b", Vectors.dense(4.0, 5.0, 6.0)))).toDF("id", "vector")
 
@@ -46,7 +43,8 @@ object ElementwiseProductExample {
     // Batch transform the vectors to create new column:
     transformer.transform(dataFrame).show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/EstimatorTransformerParamExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/EstimatorTransformerParamExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/EstimatorTransformerParamExample.scala
index 65e3c36..91076cc 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/EstimatorTransformerParamExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/EstimatorTransformerParamExample.scala
@@ -18,25 +18,22 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.classification.LogisticRegression
 import org.apache.spark.ml.param.ParamMap
 import org.apache.spark.mllib.linalg.{Vector, Vectors}
 import org.apache.spark.sql.Row
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object EstimatorTransformerParamExample {
 
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("EstimatorTransformerParamExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("EstimatorTransformerParamExample").getOrCreate()
 
     // $example on$
     // Prepare training data from a list of (label, features) tuples.
-    val training = sqlContext.createDataFrame(Seq(
+    val training = spark.createDataFrame(Seq(
       (1.0, Vectors.dense(0.0, 1.1, 0.1)),
       (0.0, Vectors.dense(2.0, 1.0, -1.0)),
       (0.0, Vectors.dense(2.0, 1.3, 1.0)),
@@ -76,7 +73,7 @@ object EstimatorTransformerParamExample {
     println("Model 2 was fit using parameters: " + 
model2.parent.extractParamMap)
 
     // Prepare test data.
-    val test = sqlContext.createDataFrame(Seq(
+    val test = spark.createDataFrame(Seq(
       (1.0, Vectors.dense(-1.0, 1.5, 1.3)),
       (0.0, Vectors.dense(3.0, 2.0, -0.1)),
       (1.0, Vectors.dense(0.0, 2.2, -1.5))
@@ -94,7 +91,7 @@ object EstimatorTransformerParamExample {
       }
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala
index cd62a80..412c54d 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala
@@ -18,24 +18,21 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.Pipeline
 import org.apache.spark.ml.classification.{GBTClassificationModel, 
GBTClassifier}
 import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
 import org.apache.spark.ml.feature.{IndexToString, StringIndexer, 
VectorIndexer}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object GradientBoostedTreeClassifierExample {
   def main(args: Array[String]): Unit = {
-    val conf = new 
SparkConf().setAppName("GradientBoostedTreeClassifierExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("GradientBoostedTreeClassifierExample").getOrCreate()
 
     // $example on$
     // Load and parse the data file, converting it to a DataFrame.
-    val data = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val data = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     // Index labels, adding metadata to the label column.
     // Fit on whole dataset to include all labels in index.
@@ -91,7 +88,7 @@ object GradientBoostedTreeClassifierExample {
     println("Learned classification GBT model:\n" + gbtModel.toDebugString)
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala
index b8cf962..fd43553 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala
@@ -18,24 +18,21 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.Pipeline
 import org.apache.spark.ml.evaluation.RegressionEvaluator
 import org.apache.spark.ml.feature.VectorIndexer
 import org.apache.spark.ml.regression.{GBTRegressionModel, GBTRegressor}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object GradientBoostedTreeRegressorExample {
   def main(args: Array[String]): Unit = {
-    val conf = new 
SparkConf().setAppName("GradientBoostedTreeRegressorExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("GradientBoostedTreeRegressorExample").getOrCreate()
 
     // $example on$
     // Load and parse the data file, converting it to a DataFrame.
-    val data = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val data = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     // Automatically identify categorical features, and index them.
     // Set maxCategories so features with > 4 distinct values are treated as 
continuous.
@@ -79,7 +76,7 @@ object GradientBoostedTreeRegressorExample {
     println("Learned regression GBT model:\n" + gbtModel.toDebugString)
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/IndexToStringExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/IndexToStringExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/IndexToStringExample.scala
index 4cea09b..d873618 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/IndexToStringExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/IndexToStringExample.scala
@@ -18,21 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.{IndexToString, StringIndexer}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object IndexToStringExample {
   def main(args: Array[String]) {
-    val conf = new SparkConf().setAppName("IndexToStringExample")
-    val sc = new SparkContext(conf)
-
-    val sqlContext = SQLContext.getOrCreate(sc)
+    val spark = 
SparkSession.builder.appName("IndexToStringExample").getOrCreate()
 
     // $example on$
-    val df = sqlContext.createDataFrame(Seq(
+    val df = spark.createDataFrame(Seq(
       (0, "a"),
       (1, "b"),
       (2, "c"),
@@ -54,7 +50,8 @@ object IndexToStringExample {
     val converted = converter.transform(indexed)
     converted.select("id", "originalCategory").show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala
index 7af0115..d2573fa 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala
@@ -19,11 +19,10 @@ package org.apache.spark.examples.ml
 
 // scalastyle:off println
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.clustering.KMeans
 import org.apache.spark.mllib.linalg.Vectors
-import org.apache.spark.sql.{DataFrame, SQLContext}
+import org.apache.spark.sql.{DataFrame, SparkSession}
 // $example off$
 
 /**
@@ -37,13 +36,11 @@ object KMeansExample {
 
   def main(args: Array[String]): Unit = {
     // Creates a Spark context and a SQL context
-    val conf = new SparkConf().setAppName(s"${this.getClass.getSimpleName}")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName(s"${this.getClass.getSimpleName}").getOrCreate()
 
     // $example on$
     // Crates a DataFrame
-    val dataset: DataFrame = sqlContext.createDataFrame(Seq(
+    val dataset: DataFrame = spark.createDataFrame(Seq(
       (1, Vectors.dense(0.0, 0.0, 0.0)),
       (2, Vectors.dense(0.1, 0.1, 0.1)),
       (3, Vectors.dense(0.2, 0.2, 0.2)),
@@ -64,7 +61,7 @@ object KMeansExample {
     model.clusterCenters.foreach(println)
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala
index f9ddac7..c23adee 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala
@@ -18,11 +18,10 @@
 package org.apache.spark.examples.ml
 
 // scalastyle:off println
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.clustering.LDA
 import org.apache.spark.mllib.linalg.{Vectors, VectorUDT}
-import org.apache.spark.sql.{Row, SQLContext}
+import org.apache.spark.sql.{Row, SparkSession}
 import org.apache.spark.sql.types.{StructField, StructType}
 // $example off$
 
@@ -41,16 +40,14 @@ object LDAExample {
 
     val input = "data/mllib/sample_lda_data.txt"
     // Creates a Spark context and a SQL context
-    val conf = new SparkConf().setAppName(s"${this.getClass.getSimpleName}")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName(s"${this.getClass.getSimpleName}").getOrCreate()
 
     // $example on$
     // Loads data
-    val rowRDD = sc.textFile(input).filter(_.nonEmpty)
+    val rowRDD = spark.read.text(input).rdd.filter(_.nonEmpty)
       .map(_.split(" ").map(_.toDouble)).map(Vectors.dense).map(Row(_))
     val schema = StructType(Array(StructField(FEATURES_COL, new VectorUDT, 
false)))
-    val dataset = sqlContext.createDataFrame(rowRDD, schema)
+    val dataset = spark.createDataFrame(rowRDD, schema)
 
     // Trains a LDA model
     val lda = new LDA()
@@ -71,7 +68,7 @@ object LDAExample {
     transformed.show(false)
 
     // $example off$
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionWithElasticNetExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionWithElasticNetExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionWithElasticNetExample.scala
index f68aef7..cb6e249 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionWithElasticNetExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionWithElasticNetExample.scala
@@ -18,22 +18,19 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.regression.LinearRegression
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object LinearRegressionWithElasticNetExample {
 
   def main(args: Array[String]): Unit = {
-    val conf = new 
SparkConf().setAppName("LinearRegressionWithElasticNetExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("LinearRegressionWithElasticNetExample").getOrCreate()
 
     // $example on$
     // Load training data
-    val training = sqlContext.read.format("libsvm")
+    val training = spark.read.format("libsvm")
       .load("data/mllib/sample_linear_regression_data.txt")
 
     val lr = new LinearRegression()
@@ -56,7 +53,7 @@ object LinearRegressionWithElasticNetExample {
     println(s"r2: ${trainingSummary.r2}")
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionSummaryExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionSummaryExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionSummaryExample.scala
index 89c5edf..50670d7 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionSummaryExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionSummaryExample.scala
@@ -18,23 +18,20 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.classification.{BinaryLogisticRegressionSummary, 
LogisticRegression}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.functions.max
 
 object LogisticRegressionSummaryExample {
 
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("LogisticRegressionSummaryExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
-    import sqlContext.implicits._
+    val spark = 
SparkSession.builder.appName("LogisticRegressionSummaryExample").getOrCreate()
+    import spark.implicits._
 
     // Load training data
-    val training = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val training = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     val lr = new LogisticRegression()
       .setMaxIter(10)
@@ -71,7 +68,7 @@ object LogisticRegressionSummaryExample {
     lrModel.setThreshold(bestThreshold)
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionWithElasticNetExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionWithElasticNetExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionWithElasticNetExample.scala
index 6e27571..fcba813 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionWithElasticNetExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionWithElasticNetExample.scala
@@ -18,22 +18,20 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.classification.LogisticRegression
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object LogisticRegressionWithElasticNetExample {
 
   def main(args: Array[String]): Unit = {
-    val conf = new 
SparkConf().setAppName("LogisticRegressionWithElasticNetExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession
+      .builder.appName("LogisticRegressionWithElasticNetExample").getOrCreate()
 
     // $example on$
     // Load training data
-    val training = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val training = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     val lr = new LogisticRegression()
       .setMaxIter(10)
@@ -47,7 +45,7 @@ object LogisticRegressionWithElasticNetExample {
     println(s"Coefficients: ${lrModel.coefficients} Intercept: 
${lrModel.intercept}")
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/MaxAbsScalerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/MaxAbsScalerExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/MaxAbsScalerExample.scala
index aafb5ef..896d8fa 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/MaxAbsScalerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/MaxAbsScalerExample.scala
@@ -15,23 +15,19 @@
  * limitations under the License.
  */
 
-// scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.MaxAbsScaler
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object MaxAbsScalerExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("MaxAbsScalerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("MaxAbsScalerExample").getOrCreate()
 
     // $example on$
-    val dataFrame = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val dataFrame = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
     val scaler = new MaxAbsScaler()
       .setInputCol("features")
       .setOutputCol("scaledFeatures")
@@ -43,7 +39,7 @@ object MaxAbsScalerExample {
     val scaledData = scalerModel.transform(dataFrame)
     scaledData.show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
-// scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/MinMaxScalerExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/MinMaxScalerExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/MinMaxScalerExample.scala
index 9a03f69..bcdca0f 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/MinMaxScalerExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/MinMaxScalerExample.scala
@@ -18,20 +18,17 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.feature.MinMaxScaler
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 object MinMaxScalerExample {
   def main(args: Array[String]): Unit = {
-    val conf = new SparkConf().setAppName("MinMaxScalerExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("MinMaxScalerExample").getOrCreate()
 
     // $example on$
-    val dataFrame = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+    val dataFrame = 
spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
 
     val scaler = new MinMaxScaler()
       .setInputCol("features")
@@ -44,7 +41,8 @@ object MinMaxScalerExample {
     val scaledData = scalerModel.transform(dataFrame)
     scaledData.show()
     // $example off$
-    sc.stop()
+
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/ModelSelectionViaCrossValidationExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/ModelSelectionViaCrossValidationExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/ModelSelectionViaCrossValidationExample.scala
index d1441b5..5fb3536 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/ModelSelectionViaCrossValidationExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/ModelSelectionViaCrossValidationExample.scala
@@ -18,7 +18,6 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.Pipeline
 import org.apache.spark.ml.classification.LogisticRegression
@@ -28,7 +27,7 @@ import org.apache.spark.ml.tuning.{CrossValidator, 
ParamGridBuilder}
 import org.apache.spark.mllib.linalg.Vector
 import org.apache.spark.sql.Row
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 /**
  * A simple example demonstrating model selection using CrossValidator.
@@ -42,13 +41,12 @@ import org.apache.spark.sql.SQLContext
 object ModelSelectionViaCrossValidationExample {
 
   def main(args: Array[String]): Unit = {
-    val conf = new 
SparkConf().setAppName("ModelSelectionViaCrossValidationExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession
+      .builder.appName("ModelSelectionViaCrossValidationExample").getOrCreate()
 
     // $example on$
     // Prepare training data from a list of (id, text, label) tuples.
-    val training = sqlContext.createDataFrame(Seq(
+    val training = spark.createDataFrame(Seq(
       (0L, "a b c d e spark", 1.0),
       (1L, "b d", 0.0),
       (2L, "spark f g h", 1.0),
@@ -98,7 +96,7 @@ object ModelSelectionViaCrossValidationExample {
     val cvModel = cv.fit(training)
 
     // Prepare test documents, which are unlabeled (id, text) tuples.
-    val test = sqlContext.createDataFrame(Seq(
+    val test = spark.createDataFrame(Seq(
       (4L, "spark i j k"),
       (5L, "l m n"),
       (6L, "mapreduce spark"),
@@ -114,7 +112,7 @@ object ModelSelectionViaCrossValidationExample {
       }
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/ModelSelectionViaTrainValidationSplitExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/ModelSelectionViaTrainValidationSplitExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/ModelSelectionViaTrainValidationSplitExample.scala
index fcad17a..6bc0829 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/ModelSelectionViaTrainValidationSplitExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/ModelSelectionViaTrainValidationSplitExample.scala
@@ -17,13 +17,12 @@
 
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.evaluation.RegressionEvaluator
 import org.apache.spark.ml.regression.LinearRegression
 import org.apache.spark.ml.tuning.{ParamGridBuilder, TrainValidationSplit}
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 /**
  * A simple example demonstrating model selection using TrainValidationSplit.
@@ -36,13 +35,12 @@ import org.apache.spark.sql.SQLContext
 object ModelSelectionViaTrainValidationSplitExample {
 
   def main(args: Array[String]): Unit = {
-    val conf = new 
SparkConf().setAppName("ModelSelectionViaTrainValidationSplitExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = SparkSession
+      
.builder.appName("ModelSelectionViaTrainValidationSplitExample").getOrCreate()
 
     // $example on$
     // Prepare training and test data.
-    val data = 
sqlContext.read.format("libsvm").load("data/mllib/sample_linear_regression_data.txt")
+    val data = 
spark.read.format("libsvm").load("data/mllib/sample_linear_regression_data.txt")
     val Array(training, test) = data.randomSplit(Array(0.9, 0.1), seed = 12345)
 
     val lr = new LinearRegression()
@@ -75,6 +73,6 @@ object ModelSelectionViaTrainValidationSplitExample {
       .show()
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/cdce4e62/examples/src/main/scala/org/apache/spark/examples/ml/MultilayerPerceptronClassifierExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/MultilayerPerceptronClassifierExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/MultilayerPerceptronClassifierExample.scala
index d7d1e82..a11fe1b 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/MultilayerPerceptronClassifierExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/MultilayerPerceptronClassifierExample.scala
@@ -18,12 +18,11 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
-import org.apache.spark.{SparkConf, SparkContext}
 // $example on$
 import org.apache.spark.ml.classification.MultilayerPerceptronClassifier
 import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
 // $example off$
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
 
 /**
  * An example for Multilayer Perceptron Classification.
@@ -31,13 +30,11 @@ import org.apache.spark.sql.SQLContext
 object MultilayerPerceptronClassifierExample {
 
   def main(args: Array[String]): Unit = {
-    val conf = new 
SparkConf().setAppName("MultilayerPerceptronClassifierExample")
-    val sc = new SparkContext(conf)
-    val sqlContext = new SQLContext(sc)
+    val spark = 
SparkSession.builder.appName("MultilayerPerceptronClassifierExample").getOrCreate()
 
     // $example on$
     // Load the data stored in LIBSVM format as a DataFrame.
-    val data = sqlContext.read.format("libsvm")
+    val data = spark.read.format("libsvm")
       .load("data/mllib/sample_multiclass_classification_data.txt")
     // Split the data into train and test
     val splits = data.randomSplit(Array(0.6, 0.4), seed = 1234L)
@@ -63,7 +60,7 @@ object MultilayerPerceptronClassifierExample {
     println("Precision:" + evaluator.evaluate(predictionAndLabels))
     // $example off$
 
-    sc.stop()
+    spark.stop()
   }
 }
 // scalastyle:on println


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to