Github user felixcheung commented on a diff in the pull request:
https://github.com/apache/spark/pull/14229#discussion_r74675724
--- Diff: R/pkg/R/mllib.R ---
@@ -299,6 +307,92 @@ setMethod("summary", signature(object =
"NaiveBayesModel"),
return(list(apriori = apriori, tables = tables))
})
+# Returns posterior probabilities from a Latent Dirichlet Allocation model
produced by spark.lda()
+
+#' @param newData A SparkDataFrame for testing
+#' @return \code{spark.posterior} returns a SparkDataFrame containing
posterior probabilities
+#' vectors named "topicDistribution"
+#' @rdname spark.lda
+#' @aliases spark.lda,spark.posterior,LDAModel-method,SparkDataFrame
+#' @export
+#' @note spark.posterior(LDAModel) since 2.1.0
+setMethod("spark.posterior", signature(object = "LDAModel", newData =
"SparkDataFrame"),
+ function(object, newData) {
+ return(dataFrame(callJMethod(object@jobj, "transform",
newData@sdf)))
+ })
+
+# Returns the summary of a Latent Dirichlet Allocation model produced by
\code{spark.lda}
+
+#' @param object A Latent Dirichlet Allocation model fitted by
\code{spark.lda}
+#' @return \code{summary} returns a list containing
+#' \code{docConcentration}, concentration parameter commonly named
\code{alpha} for the
+#' prior placed on documents distributions over topics
\code{theta};
+#' \code{topicConcentration}, concentration parameter commonly
named \code{beta} or
+#' \code{eta} for the prior placed on topic distributions over
terms;
+#' \code{logLikelihood}, log likelihood of the entire corpus;
+#' \code{logPerplexity}, log perplexity;
+#' \code{isDistributed}, TRUE for distribuetd model while FALSE
for local model;
+#' \code{vocabSize}, number of terms in the corpus;
+#' \code{topics}, top 10 terms and their weights of all topics;
+#' \code{vocabulary}, whole terms of the training corpus, NULL if
libsvm format file used as
+#' training set.
+#' @rdname spark.lda
+#' @aliases summary,spark.lda,LDAModel-method
+#' @export
+#' @note summary(LDAModel) since 2.1.0
+setMethod("summary", signature(object = "LDAModel"),
+ function(object, ...) {
+ jobj <- object@jobj
+ docConcentration <- callJMethod(jobj, "docConcentration")
+ topicConcentration <- callJMethod(jobj, "topicConcentration")
+ logLikelihood <- callJMethod(jobj, "logLikelihood")
+ logPerplexity <- callJMethod(jobj, "logPerplexity")
+ isDistributed <- callJMethod(jobj, "isDistributed")
+ vocabSize <- callJMethod(jobj, "vocabSize")
+ topics <- dataFrame(callJMethod(jobj, "topics"))
+ vocabulary <- callJMethod(jobj, "vocabulary")
+ return(list(docConcentration = unlist(docConcentration),
+ topicConcentration = topicConcentration,
+ logLikelihood = logLikelihood, logPerplexity =
logPerplexity,
+ isDistributed = isDistributed, vocabSize =
vocabSize,
+ topics = topics,
+ vocabulary = unlist(vocabulary)))
+ })
+
+# Returns the log perplexity of a Latent Dirichlet Allocation model
produced by \code{spark.lda}
+
+#' @return \code{spark.perplexity} returns the log perplexity of given
SparkDataFrame, or the log
+#' perplexity of the training data if missing argument "data".
+#' @rdname spark.lda
+#" @aliases spark.perplexity,spark.lda,LDAModel-method
+#' @export
+#' @note summary(LDAModel) since 2.1.0
+setMethod("spark.perplexity", signature(object = "LDAModel"),
+ function(object, newData) {
+ return(ifelse(missing(newData), callJMethod(object@jobj,
"logPerplexity"),
+ callJMethod(object@jobj, "computeLogPerplexity",
newData@sdf)))
+ })
+
+# Saves the Latent Dirichlet Allocation model to the input path.
+
+#' @param path The directory where the model is saved
+#' @param overwrite Overwrites or not if the output path already exists.
Default is FALSE
+#' which means throw exception if the output path exists.
+#'
+#' @rdname spark.lda
+#' @aliases write.ml,LDAModel-method,character-method
--- End diff --
only one `-method`
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]