Github user felixcheung commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16566#discussion_r97192608
  
    --- Diff: R/pkg/R/mllib_clustering.R ---
    @@ -38,6 +45,149 @@ setClass("KMeansModel", representation(jobj = "jobj"))
     #' @note LDAModel since 2.1.0
     setClass("LDAModel", representation(jobj = "jobj"))
     
    +#' Bisecting K-Means Clustering Model
    +#'
    +#' Fits a bisecting k-means clustering model against a Spark DataFrame.
    +#' Users can call \code{summary} to print a summary of the fitted model, 
\code{predict} to make
    +#' predictions on new data, and \code{write.ml}/\code{read.ml} to 
save/load fitted models.
    +#'
    +#' @param data a SparkDataFrame for training.
    +#' @param formula a symbolic description of the model to be fitted. 
Currently only a few formula
    +#'                operators are supported, including '~', '.', ':', '+', 
and '-'.
    +#'                Note that the response variable of formula is empty in 
spark.bisectingKmeans.
    +#' @param k the desired number of leaf clusters. Must be > 1.
    +#'          The actual number could be smaller if there are no divisible 
leaf clusters.
    +#' @param maxIter maximum iteration number.
    +#' @param seed the random seed.
    +#' @param minDivisibleClusterSize The minimum number of points (if greater 
than or equal to 1.0)
    +#'                                or the minimum proportion of points (if 
less than 1.0) of a divisible cluster.
    +#'                                Note that it is an expert parameter. The 
default value should be good enough
    +#'                                for most cases.
    +#' @param ... additional argument(s) passed to the method.
    +#' @return \code{spark.bisectingKmeans} returns a fitted bisecting k-means 
model.
    +#' @rdname spark.bisectingKmeans
    +#' @aliases spark.bisectingKmeans,SparkDataFrame,formula-method
    +#' @name spark.bisectingKmeans
    +#' @export
    +#' @examples
    +#' \dontrun{
    +#' sparkR.session()
    +#' df <- createDataFrame(iris)
    +#' model <- spark.bisectingKmeans(df, Sepal_Length ~ Sepal_Width, k = 4)
    +#' summary(model)
    +#'
    +#' # fitted values on training data
    +#' fitted <- predict(model, df)
    +#' head(select(fitted, "Sepal_Length", "prediction"))
    +#'
    +#' # save fitted model to input path
    +#' path <- "path/to/model"
    +#' write.ml(model, path)
    +#'
    +#' # can also read back the saved model and print
    +#' savedModel <- read.ml(path)
    +#' summary(savedModel)
    +#' }
    +#' @note spark.bisectingKmeans since 2.2.0
    +#' @seealso \link{predict}, \link{read.ml}, \link{write.ml}
    +setMethod("spark.bisectingKmeans", signature(data = "SparkDataFrame", 
formula = "formula"),
    +          function(data, formula, k = 4, maxIter = 20, seed = NULL, 
minDivisibleClusterSize = 1.0) {
    +            formula <- paste0(deparse(formula), collapse = "")
    +            if (!is.null(seed)) {
    +              seed <- as.character(as.integer(seed))
    +            }
    +            jobj <- 
callJStatic("org.apache.spark.ml.r.BisectingKMeansWrapper", "fit",
    +                                data@sdf, formula, as.integer(k), 
as.integer(maxIter),
    +                                seed, as.numeric(minDivisibleClusterSize))
    +            new("BisectingKMeansModel", jobj = jobj)
    +          })
    +
    +#  Get the summary of a bisecting k-means model
    +
    +#' @param object a fitted bisecting k-means model.
    +#' @return \code{summary} returns summary information of the fitted model, 
which is a list.
    +#'         The list includes the model's \code{k} (number of cluster 
centers),
    +#'         \code{coefficients} (model cluster centers),
    +#'         \code{size} (number of data points in each cluster), and 
\code{cluster}
    +#'         (cluster centers of the transformed data).
    --- End diff --
    
    also clarify `cluster` is NULL if is.loaded = T


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to