Github user yanboliang commented on a diff in the pull request:

    https://github.com/apache/spark/pull/12723#discussion_r61254593
  
    --- Diff: python/pyspark/ml/clustering.py ---
    @@ -453,6 +454,447 @@ def _create_model(self, java_model):
             return BisectingKMeansModel(java_model)
     
     
    +class LDAModel(JavaModel):
    +    """
    +    Latent Dirichlet Allocation (LDA) model.
    +    This abstraction permits for different underlying representations,
    +    including local and distributed data structures.
    +
    +    .. versionadded:: 2.0.0
    +    """
    +
    +    @since("2.0.0")
    +    def isDistributed(self):
    +        """
    +        Indicates whether this instance is of type DistributedLDAModel
    +        """
    +        return self._call_java("isDistributed")
    +
    +    @since("2.0.0")
    +    def vocabSize(self):
    +        """Vocabulary size (number of terms or words in the vocabulary)"""
    +        return self._call_java("vocabSize")
    +
    +    @since("2.0.0")
    +    def topicsMatrix(self):
    +        """
    +        Inferred topics, where each topic is represented by a distribution 
over terms.
    +        This is a matrix of size vocabSize x k, where each column is a 
topic.
    +        No guarantees are given about the ordering of the topics.
    +
    +        WARNING: If this model is actually a 
:py:class:`DistributedLDAModel` instance produced by
    +        the Expectation-Maximization ("em") `optimizer`, then this method 
could involve
    +        collecting a large amount of data to the driver (on the order of 
vocabSize x k).
    +        """
    +        return self._call_java("topicsMatrix")
    +
    +    @since("2.0.0")
    +    def logLikelihood(self, dataset):
    +        """
    +        Calculates a lower bound on the log likelihood of the entire 
corpus.
    +        See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
    +
    +        WARNING: If this model is an instance of 
:py:class:`DistributedLDAModel` (produced when
    +        :py:attr:`optimizer` is set to "em"), this involves collecting a 
large
    +        :py:func:`topicsMatrix` to the driver. This implementation may be 
changed in the future.
    +        """
    +        return self._call_java("logLikelihood", dataset)
    +
    +    @since("2.0.0")
    +    def logPerplexity(self, dataset):
    +        """
    +        Calculate an upper bound bound on perplexity.  (Lower is better.)
    +        See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
    +
    +        WARNING: If this model is an instance of 
:py:class:`DistributedLDAModel` (produced when
    +        :py:attr:`optimizer` is set to "em"), this involves collecting a 
large
    +        :py:func:`topicsMatrix` to the driver. This implementation may be 
changed in the future.
    +        """
    +        return self._call_java("logPerplexity", dataset)
    +
    +    @since("2.0.0")
    +    def describeTopics(self, maxTermsPerTopic=10):
    +        """
    +        Return the topics described by their top-weighted terms.
    +        """
    +        return self._call_java("describeTopics", maxTermsPerTopic)
    +
    +    @since("2.0.0")
    +    def estimatedDocConcentration(self):
    +        """
    +        Value for :py:attr:`LDA.docConcentration` estimated from data.
    +        If Online LDA was used and 
:py:attr::`LDA.optimizeDocConcentration` was set to false,
    +        then this returns the fixed (given) value for the 
:py:attr:`LDA.docConcentration` parameter.
    +        """
    +        return self._call_java("estimatedDocConcentration")
    +
    +
    +class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
    +    """
    --- End diff --
    
    ```.. note:: Experimental```


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to