Github user yanboliang commented on a diff in the pull request:
https://github.com/apache/spark/pull/12723#discussion_r61255115
--- Diff: python/pyspark/ml/clustering.py ---
@@ -453,6 +454,447 @@ def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
+class LDAModel(JavaModel):
+ """
+ Latent Dirichlet Allocation (LDA) model.
+ This abstraction permits for different underlying representations,
+ including local and distributed data structures.
+
+ .. versionadded:: 2.0.0
+ """
+
+ @since("2.0.0")
+ def isDistributed(self):
+ """
+ Indicates whether this instance is of type DistributedLDAModel
+ """
+ return self._call_java("isDistributed")
+
+ @since("2.0.0")
+ def vocabSize(self):
+ """Vocabulary size (number of terms or words in the vocabulary)"""
+ return self._call_java("vocabSize")
+
+ @since("2.0.0")
+ def topicsMatrix(self):
+ """
+ Inferred topics, where each topic is represented by a distribution
over terms.
+ This is a matrix of size vocabSize x k, where each column is a
topic.
+ No guarantees are given about the ordering of the topics.
+
+ WARNING: If this model is actually a
:py:class:`DistributedLDAModel` instance produced by
+ the Expectation-Maximization ("em") `optimizer`, then this method
could involve
+ collecting a large amount of data to the driver (on the order of
vocabSize x k).
+ """
+ return self._call_java("topicsMatrix")
+
+ @since("2.0.0")
+ def logLikelihood(self, dataset):
+ """
+ Calculates a lower bound on the log likelihood of the entire
corpus.
+ See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
+
+ WARNING: If this model is an instance of
:py:class:`DistributedLDAModel` (produced when
+ :py:attr:`optimizer` is set to "em"), this involves collecting a
large
+ :py:func:`topicsMatrix` to the driver. This implementation may be
changed in the future.
+ """
+ return self._call_java("logLikelihood", dataset)
+
+ @since("2.0.0")
+ def logPerplexity(self, dataset):
+ """
+ Calculate an upper bound bound on perplexity. (Lower is better.)
+ See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
+
+ WARNING: If this model is an instance of
:py:class:`DistributedLDAModel` (produced when
+ :py:attr:`optimizer` is set to "em"), this involves collecting a
large
+ :py:func:`topicsMatrix` to the driver. This implementation may be
changed in the future.
+ """
+ return self._call_java("logPerplexity", dataset)
+
+ @since("2.0.0")
+ def describeTopics(self, maxTermsPerTopic=10):
+ """
+ Return the topics described by their top-weighted terms.
+ """
+ return self._call_java("describeTopics", maxTermsPerTopic)
+
+ @since("2.0.0")
+ def estimatedDocConcentration(self):
+ """
+ Value for :py:attr:`LDA.docConcentration` estimated from data.
+ If Online LDA was used and
:py:attr::`LDA.optimizeDocConcentration` was set to false,
+ then this returns the fixed (given) value for the
:py:attr:`LDA.docConcentration` parameter.
+ """
+ return self._call_java("estimatedDocConcentration")
+
+
+class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
+ """
+ Distributed model fitted by :py:class:`LDA`.
+ This type of model is currently only produced by
Expectation-Maximization (EM).
+
+ This model stores the inferred topics, the full training dataset, and
the topic distribution
+ for each training document.
+
+ .. versionadded:: 2.0.0
+ """
+
+ @since("2.0.0")
+ def toLocal(self):
+ """
+ Convert this distributed model to a local representation. This
discards info about the
+ training dataset.
+
+ WARNING: This involves collecting a large :py:func:`topicsMatrix`
to the driver.
+ """
+ return LocalLDAModel(self._call_java("toLocal"))
+
+ @since("2.0.0")
+ def trainingLogLikelihood(self):
+ """
+ Log likelihood of the observed tokens in the training set,
+ given the current parameter estimates:
+ log P(docs | topics, topic distributions for docs, Dirichlet
hyperparameters)
+
+ Notes:
+ - This excludes the prior; for that, use :py:func:`logPrior`.
+ - Even with :py:func:`logPrior`, this is NOT the same as the
data log likelihood given
+ the hyperparameters.
+ - This is computed from the topic distributions computed during
training. If you call
+ :py:func:`logLikelihood` on the same training dataset, the
topic distributions
+ will be computed again, possibly giving different results.
+ """
+ return self._call_java("trainingLogLikelihood")
+
+ @since("2.0.0")
+ def logPrior(self):
+ """
+ Log probability of the current parameter estimate:
+ log P(topics, topic distributions for docs | alpha, eta)
+ """
+ return self._call_java("logPrior")
+
+ @since("2.0.0")
+ def getCheckpointFiles(self):
+ """
+ If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is
set to true, then there may
+ be saved checkpoint files. This method is provided so that users
can manage those files.
+
+ Note that removing the checkpoints can cause failures if a
partition is lost and is needed
+ by certain :py:class:`DistributedLDAModel` methods. Reference
counting will clean up the
+ checkpoints when this model and derivative data go out of scope.
+
+ :return List of checkpoint files from training
+ """
+ return self._call_java("getCheckpointFiles")
+
+
+class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
+ """
+ Local (non-distributed) model fitted by :py:class:`LDA`.
+ This model stores the inferred topics only; it does not store info
about the training dataset.
+
+ .. versionadded:: 2.0.0
+ """
+ pass
+
+
+class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed,
HasCheckpointInterval,
+ JavaMLReadable, JavaMLWritable):
+ """
+ Latent Dirichlet Allocation (LDA), a topic model designed for text
documents.
+
+ Terminology:
+
+ - "term" = "word": an el
+ - "token": instance of a term appearing in a document
+ - "topic": multinomial distribution over terms representing some
concept
+ - "document": one piece of text, corresponding to one row in the
input data
+
+ Original LDA paper (journal version):
+ Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
+
+ Input data (featuresCol):
+ LDA is given a collection of documents as input data, via the
featuresCol parameter.
+ Each document is specified as a :py:class:`Vector` of length
vocabSize, where each entry is the
+ count for the corresponding term (word) in the document. Feature
transformers such as
+ :py:class:`pyspark.ml.feature.Tokenizer` and
:py:class:`pyspark.ml.feature.CountVectorizer`
+ can be useful for converting text to word count vectors.
+
+ >>> from pyspark.mllib.linalg import Vectors, SparseVector
+ >>> from pyspark.ml.clustering import LDA
+ >>> df = sqlContext.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
+ ... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
+ >>> lda = LDA(k=2, seed=1, optimizer="em")
+ >>> model = lda.fit(df)
+ >>> model.isDistributed()
+ True
+ >>> localModel = model.toLocal()
+ >>> localModel.isDistributed()
+ False
+ >>> model.vocabSize()
+ 2
+ >>> model.describeTopics().show()
+ +-----+-----------+--------------------+
+ |topic|termIndices| termWeights|
+ +-----+-----------+--------------------+
+ | 0| [1, 0]|[0.50401530077160...|
+ | 1| [0, 1]|[0.50401530077160...|
+ +-----+-----------+--------------------+
+ ...
+ >>> model.topicsMatrix()
+ DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
+
+ .. versionadded:: 2.0.0
+ """
+
+ k = Param(Params._dummy(), "k", "number of topics (clusters) to infer",
+ typeConverter=TypeConverters.toInt)
+ optimizer = Param(Params._dummy(), "optimizer",
+ "Optimizer or inference algorithm used to estimate
the LDA model. "
--- End diff --
Usually we have ```+``` at the end of all Param doc line. Can you check
whether it produced correct Python doc w/o ```+```?
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]