Repository: spark
Updated Branches:
  refs/heads/master e5aaae6e1 -> 56a0aba0a


[SPARK-11952][ML] Remove duplicate ml examples

Remove duplicate ml examples (only for ml).  mengxr

Author: Yanbo Liang <yblia...@gmail.com>

Closes #9933 from yanboliang/SPARK-11685.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/56a0aba0
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/56a0aba0
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/56a0aba0

Branch: refs/heads/master
Commit: 56a0aba0a60326ba026056c9a23f3f6ec7258c19
Parents: e5aaae6
Author: Yanbo Liang <yblia...@gmail.com>
Authored: Tue Nov 24 09:52:53 2015 -0800
Committer: Xiangrui Meng <m...@databricks.com>
Committed: Tue Nov 24 09:52:53 2015 -0800

----------------------------------------------------------------------
 .../main/python/ml/gradient_boosted_trees.py    | 82 ------------------
 .../src/main/python/ml/logistic_regression.py   | 66 ---------------
 .../src/main/python/ml/random_forest_example.py | 87 --------------------
 3 files changed, 235 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/56a0aba0/examples/src/main/python/ml/gradient_boosted_trees.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/gradient_boosted_trees.py 
b/examples/src/main/python/ml/gradient_boosted_trees.py
deleted file mode 100644
index c3bf8aa..0000000
--- a/examples/src/main/python/ml/gradient_boosted_trees.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from __future__ import print_function
-
-import sys
-
-from pyspark import SparkContext
-from pyspark.ml.classification import GBTClassifier
-from pyspark.ml.feature import StringIndexer
-from pyspark.ml.regression import GBTRegressor
-from pyspark.mllib.evaluation import BinaryClassificationMetrics, 
RegressionMetrics
-from pyspark.sql import Row, SQLContext
-
-"""
-A simple example demonstrating a Gradient Boosted Trees 
Classification/Regression Pipeline.
-Note: GBTClassifier only supports binary classification currently
-Run with:
-  bin/spark-submit examples/src/main/python/ml/gradient_boosted_trees.py
-"""
-
-
-def testClassification(train, test):
-    # Train a GradientBoostedTrees model.
-
-    rf = GBTClassifier(maxIter=30, maxDepth=4, labelCol="indexedLabel")
-
-    model = rf.fit(train)
-    predictionAndLabels = model.transform(test).select("prediction", 
"indexedLabel") \
-        .map(lambda x: (x.prediction, x.indexedLabel))
-
-    metrics = BinaryClassificationMetrics(predictionAndLabels)
-    print("AUC %.3f" % metrics.areaUnderROC)
-
-
-def testRegression(train, test):
-    # Train a GradientBoostedTrees model.
-
-    rf = GBTRegressor(maxIter=30, maxDepth=4, labelCol="indexedLabel")
-
-    model = rf.fit(train)
-    predictionAndLabels = model.transform(test).select("prediction", 
"indexedLabel") \
-        .map(lambda x: (x.prediction, x.indexedLabel))
-
-    metrics = RegressionMetrics(predictionAndLabels)
-    print("rmse %.3f" % metrics.rootMeanSquaredError)
-    print("r2 %.3f" % metrics.r2)
-    print("mae %.3f" % metrics.meanAbsoluteError)
-
-
-if __name__ == "__main__":
-    if len(sys.argv) > 1:
-        print("Usage: gradient_boosted_trees", file=sys.stderr)
-        exit(1)
-    sc = SparkContext(appName="PythonGBTExample")
-    sqlContext = SQLContext(sc)
-
-    # Load the data stored in LIBSVM format as a DataFrame.
-    df = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
-
-    # Map labels into an indexed column of labels in [0, numLabels)
-    stringIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel")
-    si_model = stringIndexer.fit(df)
-    td = si_model.transform(df)
-    [train, test] = td.randomSplit([0.7, 0.3])
-    testClassification(train, test)
-    testRegression(train, test)
-    sc.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/56a0aba0/examples/src/main/python/ml/logistic_regression.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/logistic_regression.py 
b/examples/src/main/python/ml/logistic_regression.py
deleted file mode 100644
index 4cd027f..0000000
--- a/examples/src/main/python/ml/logistic_regression.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from __future__ import print_function
-
-import sys
-
-from pyspark import SparkContext
-from pyspark.ml.classification import LogisticRegression
-from pyspark.mllib.evaluation import MulticlassMetrics
-from pyspark.ml.feature import StringIndexer
-from pyspark.sql import SQLContext
-
-"""
-A simple example demonstrating a logistic regression with elastic net 
regularization Pipeline.
-Run with:
-  bin/spark-submit examples/src/main/python/ml/logistic_regression.py
-"""
-
-if __name__ == "__main__":
-
-    if len(sys.argv) > 1:
-        print("Usage: logistic_regression", file=sys.stderr)
-        exit(-1)
-
-    sc = SparkContext(appName="PythonLogisticRegressionExample")
-    sqlContext = SQLContext(sc)
-
-    # Load the data stored in LIBSVM format as a DataFrame.
-    df = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
-
-    # Map labels into an indexed column of labels in [0, numLabels)
-    stringIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel")
-    si_model = stringIndexer.fit(df)
-    td = si_model.transform(df)
-    [training, test] = td.randomSplit([0.7, 0.3])
-
-    lr = LogisticRegression(maxIter=100, 
regParam=0.3).setLabelCol("indexedLabel")
-    lr.setElasticNetParam(0.8)
-
-    # Fit the model
-    lrModel = lr.fit(training)
-
-    predictionAndLabels = lrModel.transform(test).select("prediction", 
"indexedLabel") \
-        .map(lambda x: (x.prediction, x.indexedLabel))
-
-    metrics = MulticlassMetrics(predictionAndLabels)
-    print("weighted f-measure %.3f" % metrics.weightedFMeasure())
-    print("precision %s" % metrics.precision())
-    print("recall %s" % metrics.recall())
-
-    sc.stop()

http://git-wip-us.apache.org/repos/asf/spark/blob/56a0aba0/examples/src/main/python/ml/random_forest_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/ml/random_forest_example.py 
b/examples/src/main/python/ml/random_forest_example.py
deleted file mode 100644
index dc6a778..0000000
--- a/examples/src/main/python/ml/random_forest_example.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from __future__ import print_function
-
-import sys
-
-from pyspark import SparkContext
-from pyspark.ml.classification import RandomForestClassifier
-from pyspark.ml.feature import StringIndexer
-from pyspark.ml.regression import RandomForestRegressor
-from pyspark.mllib.evaluation import MulticlassMetrics, RegressionMetrics
-from pyspark.mllib.util import MLUtils
-from pyspark.sql import Row, SQLContext
-
-"""
-A simple example demonstrating a RandomForest Classification/Regression 
Pipeline.
-Run with:
-  bin/spark-submit examples/src/main/python/ml/random_forest_example.py
-"""
-
-
-def testClassification(train, test):
-    # Train a RandomForest model.
-    # Setting featureSubsetStrategy="auto" lets the algorithm choose.
-    # Note: Use larger numTrees in practice.
-
-    rf = RandomForestClassifier(labelCol="indexedLabel", numTrees=3, 
maxDepth=4)
-
-    model = rf.fit(train)
-    predictionAndLabels = model.transform(test).select("prediction", 
"indexedLabel") \
-        .map(lambda x: (x.prediction, x.indexedLabel))
-
-    metrics = MulticlassMetrics(predictionAndLabels)
-    print("weighted f-measure %.3f" % metrics.weightedFMeasure())
-    print("precision %s" % metrics.precision())
-    print("recall %s" % metrics.recall())
-
-
-def testRegression(train, test):
-    # Train a RandomForest model.
-    # Note: Use larger numTrees in practice.
-
-    rf = RandomForestRegressor(labelCol="indexedLabel", numTrees=3, maxDepth=4)
-
-    model = rf.fit(train)
-    predictionAndLabels = model.transform(test).select("prediction", 
"indexedLabel") \
-        .map(lambda x: (x.prediction, x.indexedLabel))
-
-    metrics = RegressionMetrics(predictionAndLabels)
-    print("rmse %.3f" % metrics.rootMeanSquaredError)
-    print("r2 %.3f" % metrics.r2)
-    print("mae %.3f" % metrics.meanAbsoluteError)
-
-
-if __name__ == "__main__":
-    if len(sys.argv) > 1:
-        print("Usage: random_forest_example", file=sys.stderr)
-        exit(1)
-    sc = SparkContext(appName="PythonRandomForestExample")
-    sqlContext = SQLContext(sc)
-
-    # Load the data stored in LIBSVM format as a DataFrame.
-    df = 
sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
-
-    # Map labels into an indexed column of labels in [0, numLabels)
-    stringIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel")
-    si_model = stringIndexer.fit(df)
-    td = si_model.transform(df)
-    [train, test] = td.randomSplit([0.7, 0.3])
-    testClassification(train, test)
-    testRegression(train, test)
-    sc.stop()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to