Github user zjffdu commented on a diff in the pull request:
https://github.com/apache/spark/pull/10242#discussion_r57837880
--- Diff: python/pyspark/ml/clustering.py ---
@@ -292,6 +293,376 @@ def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
+class LDAModel(JavaModel):
+ """
+ A clustering model derived from the LDA method.
+
+ .. versionadded:: 2.0.0
+ """
+
+ @since("2.0.0")
+ def isDistributed(self):
+ """Indicates whether this instance is of type
DistributedLDAModel"""
+ return self._call_java("isDistributed")
+
+ @since("2.0.0")
+ def vocabSize(self):
+ """Vocabulary size (number of terms or terms in the vocabulary)"""
+ return self._call_java("vocabSize")
+
+ @since("2.0.0")
+ def topicsMatrix(self):
+ """ Inferred topics, where each topic is represented by a
distribution over terms.
+ This is a matrix of size vocabSize x k, where each column is a
topic.
+ No guarantees are given about the ordering of the topics.
+
+ WARNING: If this model is actually a [[DistributedLDAModel]]
instance produced by
+ the Expectation-Maximization ("em") [[optimizer]], then this
method could involve
+ collecting a large amount of data to the driver (on the order of
vocabSize x k).
+ """
+ return self._call_java("topicsMatrix")
+
+ @since("2.0.0")
+ def logLikelihood(self, dataset):
+ """Calculates a lower bound on the log likelihood of the entire
corpus.
+ See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
+
+ WARNING: If this model is an instance of [[DistributedLDAModel]]
(produced when
+ [[optimizer]] is set to "em"), this involves collecting a large
[[topicsMatrix]] to the
+ driver. This implementation may be changed in the future.
+ """
+ return self._call_java("logLikelihood", dataset)
+
+ @since("2.0.0")
+ def logPerplexity(self, dataset):
+ """Calculate an upper bound bound on perplexity. (Lower is
better.)
+ See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
+
+ WARNING: If this model is an instance of [[DistributedLDAModel]]
(produced when
+ [[optimizer]] is set to "em"), this involves collecting a large
[[topicsMatrix]] to the
+ driver. This implementation may be changed in the future.
+ """
+ return self._call_java("logPerplexity", dataset)
+
+ @since("2.0.0")
+ def describeTopics(self, maxTermsPerTopic=10):
+ """ Return the topics described by their top-weighted terms.
+
+ WARNING: If vocabSize and k are large, this can return a large
object!
+
+ :param maxTermsPerTopic: Maximum number of terms to collect for
each topic.
+ Default value of 10.
+ :return: Local DataFrame with one topic per Row, with columns:
+ - "topic": IntegerType: topic index
+ - "termIndices": ArrayType(IntegerType): term indices,
sorted in order of decreasing
+ term importance
+ - "termWeights": ArrayType(DoubleType): corresponding
sorted term weights
+ """
+ return self._call_java("describeTopics", maxTermsPerTopic)
+
+ @since("2.0.0")
+ def estimatedDocConcentration(self):
+ """Value for [[docConcentration]] estimated from data.
+ If Online LDA was used and [[optimizeDocConcentration]] was set to
false,
+ then this returns the fixed (given) value for the
[[docConcentration]] parameter.
+ """
+ return self._call_java("estimatedDocConcentration")
+
+
+class DistributedLDAModel(LDAModel):
+ """
+ Model fitted by LDA.
+
+ .. versionadded:: 2.0.0
+ """
+ def toLocal(self):
+ return self._call_java("toLocal")
+
+
+class LocalLDAModel(LDAModel):
+ """
+ Model fitted by LDA.
+
+ .. versionadded:: 2.0.0
+ """
+ pass
+
+
+class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed,
HasCheckpointInterval):
+ """
+ Latent Dirichlet Allocation (LDA), a topic model designed for text
documents.
+ Terminology
+ - "word" = "term": an element of the vocabulary
+ - "token": instance of a term appearing in a document
+ - "topic": multinomial distribution over words representing some
concept
+ References:
+ - Original LDA paper (journal version):
+ Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
+
+ >>> from pyspark.mllib.linalg import Vectors, SparseVector
+ >>> from pyspark.ml.clustering import LDA
+ >>> df = sqlContext.createDataFrame([[1, Vectors.dense([0.0, 1.0])], \
+ [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
+ >>> lda = LDA(k=2, seed=1, optimizer="em")
+ >>> model = lda.fit(df)
+ >>> model.isDistributed()
+ True
+ >>> localModel = model.toLocal()
+ >>> localModel.isDistributed()
+ False
+ >>> model.vocabSize()
+ 2
+ >>> model.describeTopics().show()
+ +-----+-----------+--------------------+
+ |topic|termIndices| termWeights|
+ +-----+-----------+--------------------+
+ | 0| [1, 0]|[0.50401530077160...|
+ | 1| [0, 1]|[0.50401530077160...|
+ +-----+-----------+--------------------+
+ ...
+ >>> model.topicsMatrix()
+ DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
+ >>> model.estimatedDocConcentration()
+ DenseVector([26.0, 26.0])
+
+ .. versionadded:: 2.0.0
+ """
+
+ k = Param(Params._dummy(), "k", "number of topics (clusters) to infer")
+ optimizer = Param(Params._dummy(), "optimizer",
+ "Optimizer or inference algorithm used to estimate
the LDA model. "
+ "Supported: online, em")
+ learningOffset = Param(Params._dummy(), "learningOffset",
+ "A (positive) learning parameter that
downweights early iterations."
+ " Larger values make early iterations count
less")
+ learningDecay = Param(Params._dummy(), "learningDecay", "Learning
rate, set as an"
+ "exponential decay rate. This should be between
(0.5, 1.0] to "
+ "guarantee asymptotic convergence.")
+ subsamplingRate = Param(Params._dummy(), "subsamplingRate",
+ "Fraction of the corpus to be sampled and used
in each iteration "
+ "of mini-batch gradient descent, in range (0,
1].")
+ optimizeDocConcentration = Param(Params._dummy(),
"optimizeDocConcentration",
+ "Indicates whether the
docConcentration (Dirichlet parameter "
+ "for document-topic distribution)
will be optimized during "
+ "training.")
+ docConcentration = Param(Params._dummy(), "docConcentration",
+ "Concentration parameter (commonly named
\"alpha\") for the "
+ "prior placed on documents' distributions
over topics (\"theta\").")
+ topicConcentration = Param(Params._dummy(), "topicConcentration",
+ "Concentration parameter (commonly named
\"beta\" or \"eta\") for "
+ "the prior placed on topic' distributions
over terms.")
+ topicDistribution = Param(Params._dummy(), "topicDistribution",
--- End diff --
Params require the declared field name same as the value of its field name,
otherwise we will get error.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]