Github user jkbradley commented on a diff in the pull request: https://github.com/apache/spark/pull/10150#discussion_r49252449 --- Diff: python/pyspark/mllib/clustering.py --- @@ -38,13 +38,120 @@ from pyspark.mllib.util import Saveable, Loader, inherit_doc, JavaLoader, JavaSaveable from pyspark.streaming import DStream -__all__ = ['KMeansModel', 'KMeans', 'GaussianMixtureModel', 'GaussianMixture', - 'PowerIterationClusteringModel', 'PowerIterationClustering', - 'StreamingKMeans', 'StreamingKMeansModel', +__all__ = ['BisectingKMeansModel', 'BisectingKMeans', 'KMeansModel', 'KMeans', + 'GaussianMixtureModel', 'GaussianMixture', 'PowerIterationClusteringModel', + 'PowerIterationClustering', 'StreamingKMeans', 'StreamingKMeansModel', 'LDA', 'LDAModel'] @inherit_doc +class BisectingKMeansModel(JavaModelWrapper): + """ + .. note:: Experimental + + A clustering model derived from the bisecting k-means method. + + >>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2) + >>> bskm = BisectingKMeans() + >>> model = bskm.train(sc.parallelize(data), k=4) + >>> p = array([0.0, 0.0]) + >>> model.predict(p) == model.predict(p) + True + >>> model.predict(sc.parallelize([p])).first() == model.predict(p) + True + >>> model.k + 4 + >>> model.computeCost(array([0.0, 0.0])) + 0.0 + >>> model.k == len(model.clusterCenters) + True + >>> model = bskm.train(sc.parallelize(data), k=2) + >>> model.predict(array([0.0, 0.0])) == model.predict(array([1.0, 1.0])) + True + >>> model.k + 2 + + .. versionadded:: 2.0.0 + """ + + @property + @since('2.0.0') + def clusterCenters(self): + """Get the cluster centers, represented as a list of NumPy arrays.""" + return [c.toArray() for c in self.call("clusterCenters")] + + @property + @since('2.0.0') + def k(self): + """Get the number of clusters""" + return self.call("k") + + @since('2.0.0') + def predict(self, x): + """ + Find the cluster to which x belongs in this model. + + :param x: Either the point to determine the cluster for or an RDD of points to determine + the clusters for. + """ + if isinstance(x, RDD): + vecs = x.map(_convert_to_vector) + return self.call("predict", vecs) + + x = _convert_to_vector(x) + return self.call("predict", x) + + @since('2.0.0') + def computeCost(self, point): + """ + Return the Bisecting K-means cost (sum of squared distances of points to + their nearest center) for this model on the given data. + + :param point: the point to compute the cost to + """ + return self.call("computeCost", _convert_to_vector(point)) + + +class BisectingKMeans: + """ + .. note:: Experimental + + A bisecting k-means algorithm based on the paper "A comparison of document clustering --- End diff -- Update: It should actually be 74 chars. You can check with ```pydoc pyspark``` from the spark/python directory and changing the terminal size to 80 chars wide.
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org