Github user wangmiao1981 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16566#discussion_r96165457
  
    --- Diff: R/pkg/R/mllib_clustering.R ---
    @@ -38,6 +45,146 @@ setClass("KMeansModel", representation(jobj = "jobj"))
     #' @note LDAModel since 2.1.0
     setClass("LDAModel", representation(jobj = "jobj"))
     
    +#' Bisecting K-Means Clustering Model
    +#'
    +#' Fits a bisecting k-means clustering model against a Spark DataFrame.
    +#' Users can call \code{summary} to print a summary of the fitted model, 
\code{predict} to make
    +#' predictions on new data, and \code{write.ml}/\code{read.ml} to 
save/load fitted models.
    +#'
    +#' @param data a SparkDataFrame for training.
    +#' @param formula a symbolic description of the model to be fitted. 
Currently only a few formula
    +#'                operators are supported, including '~', '.', ':', '+', 
and '-'.
    +#'                Note that the response variable of formula is empty in 
spark.bisectingKmeans.
    +#' @param k the desired number of leaf clusters. Must be > 1.
    +#'          The actual number could be smaller if there are no divisible 
leaf clusters.
    +#' @param maxIter maximum iteration number.
    +#' @param minDivisibleClusterSize The minimum number of points (if greater 
than or equal to 1.0)
    +#'                                or the minimum proportion of points (if 
less than 1.0) of a divisible cluster.
    +#' @param seed the random seed.
    +#' @param ... additional argument(s) passed to the method.
    +#' @return \code{spark.bisectingKmeans} returns a fitted bisecting k-means 
model.
    +#' @rdname spark.bisectingKmeans
    +#' @aliases spark.bisectingKmeans,SparkDataFrame,formula-method
    +#' @name spark.bisectingKmeans
    +#' @export
    +#' @examples
    +#' \dontrun{
    +#' sparkR.session()
    +#' data(iris)
    +#' df <- createDataFrame(iris)
    +#' model <- spark.bisectingKmeans(df, Sepal_Length ~ Sepal_Width, k = 4)
    +#' summary(model)
    +#'
    +#' # fitted values on training data
    +#' fitted <- predict(model, df)
    +#' head(select(fitted, "Sepal_Length", "prediction"))
    +#'
    +#' # save fitted model to input path
    +#' path <- "path/to/model"
    +#' write.ml(model, path)
    +#'
    +#' # can also read back the saved model and print
    +#' savedModel <- read.ml(path)
    +#' summary(savedModel)
    +#' }
    +#' @note spark.bisectingKmeans since 2.2.0
    +#' @seealso \link{predict}, \link{read.ml}, \link{write.ml}
    +setMethod("spark.bisectingKmeans", signature(data = "SparkDataFrame", 
formula = "formula"),
    +          function(data, formula, k = 4, maxIter = 20, 
minDivisibleClusterSize = 1.0, seed = NULL) {
    +            formula <- paste0(deparse(formula), collapse = "")
    +            if (!is.null(seed)) {
    +              seed <- as.character(as.integer(seed))
    +            }
    +            jobj <- 
callJStatic("org.apache.spark.ml.r.BisectingKMeansWrapper", "fit",
    +                                data@sdf, formula, as.integer(k), 
as.integer(maxIter),
    +                                as.numeric(minDivisibleClusterSize), seed)
    +            new("BisectingKMeansModel", jobj = jobj)
    +          })
    +
    +#  Get the summary of a bisecting k-means model
    +
    +#' @param object a fitted bisecting k-means model.
    +#' @return \code{summary} returns summary information of the fitted model, 
which is a list.
    +#'         The list includes the model's \code{k} (number of cluster 
centers),
    +#'         \code{coefficients} (model cluster centers),
    +#'         \code{size} (number of data points in each cluster), and 
\code{cluster}
    +#'         (cluster centers of the transformed data).
    +#' @rdname spark.bisectingKmeans
    +#' @export
    +#' @note summary(BisectingKMeansModel) since 2.2.0
    +setMethod("summary", signature(object = "BisectingKMeansModel"),
    +          function(object) {
    +            jobj <- object@jobj
    +            is.loaded <- callJMethod(jobj, "isLoaded")
    +            features <- callJMethod(jobj, "features")
    +            coefficients <- callJMethod(jobj, "coefficients")
    +            k <- callJMethod(jobj, "k")
    +            size <- callJMethod(jobj, "size")
    +            coefficients <- t(matrix(coefficients, ncol = k))
    +            colnames(coefficients) <- unlist(features)
    +            rownames(coefficients) <- 1:k
    +            cluster <- if (is.loaded) {
    +              NULL
    +            } else {
    +              dataFrame(callJMethod(jobj, "cluster"))
    +            }
    +            list(k = k, coefficients = coefficients, size = size,
    +            cluster = cluster, is.loaded = is.loaded)
    +          })
    +
    +#  Predicted values based on a bisecting k-means model
    +
    +#' @param newData a SparkDataFrame for testing.
    +#' @return \code{predict} returns the predicted values based on a 
bisecting k-means model.
    +#' @rdname spark.bisectingKmeans
    +#' @export
    +#' @note predict(BisectingKMeansModel) since 2.2.0
    +setMethod("predict", signature(object = "BisectingKMeansModel"),
    +          function(object, newData) {
    +            predict_internal(object, newData)
    +          })
    +
    +#' Get fitted result from a bisecting k-means model
    +#'
    +#' Get fitted result from a bisecting k-means model.
    +#' Note: A saved-loaded model does not support this method.
    +#'
    +#' @return \code{fitted} returns a SparkDataFrame containing fitted values.
    +#' @rdname fitted
    +#' @export
    +#' @examples
    +#' \dontrun{
    +#' model <- spark.bisectingKmeans(trainingData, ~ ., 2)
    +#' fitted.model <- fitted(model)
    +#' showDF(fitted.model)
    +#'}
    +#' @note fitted since 2.2.0
    +setMethod("fitted", signature(object = "BisectingKMeansModel"),
    --- End diff --
    
    `spark.kmeans` has the `fitted` method. As these two are similar, I added 
it to bisecting kmeans.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to