Github user sethah commented on a diff in the pull request:

    https://github.com/apache/spark/pull/12925#discussion_r62252859
  
    --- Diff: examples/src/main/python/ml/kmeans_example.py ---
    @@ -17,52 +17,39 @@
     
     from __future__ import print_function
     
    -import sys
    -
    -import numpy as np
    +# $example on$
     from pyspark.ml.clustering import KMeans, KMeansModel
    -from pyspark.mllib.linalg import VectorUDT, _convert_to_vector
    -from pyspark.sql import SparkSession
    +from pyspark.mllib.linalg import Vectors
     from pyspark.sql.types import Row, StructField, StructType
    +# $example off$
    +
    +from pyspark.sql import SparkSession
    +
     
     """
     A simple example demonstrating a k-means clustering.
     Run with:
    -  bin/spark-submit examples/src/main/python/ml/kmeans_example.py <input> 
<k>
    -
    -This example requires NumPy (http://www.numpy.org/).
    +  bin/spark-submit examples/src/main/python/ml/kmeans_example.py
     """
     
     
    -def parseVector(row):
    -    array = np.array([float(x) for x in row.value.split(' ')])
    -    return _convert_to_vector(array)
    -
    -
     if __name__ == "__main__":
     
    -    FEATURES_COL = "features"
    -
    -    if len(sys.argv) != 3:
    -        print("Usage: kmeans_example.py <file> <k>", file=sys.stderr)
    -        exit(-1)
    -    path = sys.argv[1]
    -    k = sys.argv[2]
    -
         spark = 
SparkSession.builder.appName("PythonKMeansExample").getOrCreate()
     
    -    lines = spark.read.text(path).rdd
    -    data = lines.map(parseVector)
    -    row_rdd = data.map(lambda x: Row(x))
    -    schema = StructType([StructField(FEATURES_COL, VectorUDT(), False)])
    -    df = spark.createDataFrame(row_rdd, schema)
    +    # $example on$
    +    data = spark.read.text("data/mllib/kmeans_data.txt").rdd
    +    parsed = data \
    +        .map(lambda row: Row(features=Vectors.dense([float(x) for x in 
row.value.split(' ')])))
    +    dataset = spark.createDataFrame(parsed)
     
    -    kmeans = KMeans().setK(2).setSeed(1).setFeaturesCol(FEATURES_COL)
    -    model = kmeans.fit(df)
    +    kmeans = KMeans().setK(2).setSeed(1).setFeaturesCol("features")
    +    model = kmeans.fit(dataset)
         centers = model.clusterCenters()
     
         print("Cluster Centers: ")
    --- End diff --
    
    Can we also print "Within Set Sum of Squared Errors = ..." like the mllib 
example?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to