Github user EntilZha commented on a diff in the pull request:
https://github.com/apache/spark/pull/4047#discussion_r23501086
--- Diff: mllib/src/main/scala/org/apache/spark/mllib/clustering/LDA.scala
---
@@ -0,0 +1,472 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.mllib.clustering
+
+import java.util.Random
+
+import breeze.linalg.{DenseVector => BDV, sum => brzSum, normalize, axpy
=> brzAxpy}
+
+import org.apache.spark.Logging
+import org.apache.spark.annotation.DeveloperApi
+import org.apache.spark.graphx._
+import org.apache.spark.mllib.linalg.Vector
+import org.apache.spark.rdd.RDD
+import org.apache.spark.util.Utils
+
+
+/**
+ * :: DeveloperApi ::
+ *
+ * Latent Dirichlet Allocation (LDA), a topic model designed for text
documents.
+ *
+ * Terminology:
+ * - "word" = "term": an element of the vocabulary
+ * - "token": instance of a term appearing in a document
+ * - "topic": multinomial distribution over words representing some
concept
+ *
+ * Currently, the underlying implementation uses Expectation-Maximization
(EM), implemented
+ * according to the Asuncion et al. (2009) paper referenced below.
+ *
+ * References:
+ * - Original LDA paper (journal version):
+ * Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
+ * - This class implements their "smoothed" LDA model.
+ * - Paper which clearly explains several algorithms, including EM:
+ * Asuncion, Welling, Smyth, and Teh.
+ * "On Smoothing and Inference for Topic Models." UAI, 2009.
+ *
+ * NOTE: This is currently marked DeveloperApi since it is under active
development and may undergo
+ * API changes.
+ */
+@DeveloperApi
+class LDA private (
+ private var k: Int,
+ private var maxIterations: Int,
+ private var topicSmoothing: Double,
+ private var termSmoothing: Double,
+ private var seed: Long) extends Logging {
+
+ import LDA._
+
+ def this() = this(k = 10, maxIterations = 20, topicSmoothing = -1,
termSmoothing = -1,
+ seed = Utils.random.nextLong())
+
+ /**
+ * Number of topics to infer. I.e., the number of soft cluster centers.
+ * (default = 10)
+ */
+ def getK: Int = k
+
+ def setK(k: Int): this.type = {
+ require(k > 0, s"LDA k (number of clusters) must be > 0, but was set
to $k")
+ this.k = k
+ this
+ }
+
+ /**
+ * Topic smoothing parameter (commonly named "alpha").
+ *
+ * This is the parameter to the Dirichlet prior placed on the
per-document topic distributions
+ * ("theta"). We use a symmetric Dirichlet prior.
+ *
+ * This value should be > 0.0, where larger values mean more smoothing
(more regularization).
+ * If set to -1, then topicSmoothing is set automatically.
+ * (default = -1 = automatic)
+ *
+ * Automatic setting of parameter:
+ * - For EM: default = (50 / k) + 1.
+ * - The 50/k is common in LDA libraries.
+ * - The +1 follows Asuncion et al. (2009), who recommend a +1
adjustment for EM.
+ */
+ def getTopicSmoothing: Double = topicSmoothing
+
+ def setTopicSmoothing(topicSmoothing: Double): this.type = {
+ require(topicSmoothing > 0.0 || topicSmoothing == -1.0,
+ s"LDA topicSmoothing must be > 0 (or -1 for auto), but was set to
$topicSmoothing")
+ if (topicSmoothing > 0.0 && topicSmoothing <= 1.0) {
+ logWarning(s"LDA.topicSmoothing was set to $topicSmoothing, but for
EM, we recommend > 1.0")
+ }
+ this.topicSmoothing = topicSmoothing
+ this
+ }
+
+ /**
+ * Term smoothing parameter (commonly named "eta").
+ *
+ * This is the parameter to the Dirichlet prior placed on the per-topic
word distributions
+ * (which are called "beta" in the original LDA paper by Blei et al.,
but are called "phi" in many
+ * later papers such as Asuncion et al., 2009.)
+ *
+ * This value should be > 0.0.
+ * If set to -1, then termSmoothing is set automatically.
+ * (default = -1 = automatic)
+ *
+ * Automatic setting of parameter:
+ * - For EM: default = 0.1 + 1.
+ * - The 0.1 gives a small amount of smoothing.
+ * - The +1 follows Asuncion et al. (2009), who recommend a +1
adjustment for EM.
+ */
+ def getTermSmoothing: Double = termSmoothing
+
+ def setTermSmoothing(termSmoothing: Double): this.type = {
+ require(termSmoothing > 0.0 || termSmoothing == -1.0,
+ s"LDA termSmoothing must be > 0 (or -1 for auto), but was set to
$termSmoothing")
+ if (termSmoothing > 0.0 && termSmoothing <= 1.0) {
+ logWarning(s"LDA.termSmoothing was set to $termSmoothing, but for
EM, we recommend > 1.0")
+ }
+ this.termSmoothing = termSmoothing
+ this
+ }
+
+ /**
+ * Maximum number of iterations for learning.
+ * (default = 20)
+ */
+ def getMaxIterations: Int = maxIterations
+
+ def setMaxIterations(maxIterations: Int): this.type = {
+ this.maxIterations = maxIterations
+ this
+ }
+
+ /** Random seed */
+ def getSeed: Long = seed
+
+ def setSeed(seed: Long): this.type = {
+ this.seed = seed
+ this
+ }
+
+ /**
+ * Learn an LDA model using the given dataset.
+ *
+ * @param documents RDD of documents, where each document is
represented as a vector of term
+ * counts plus an ID. Document IDs must be >= 0.
+ * @return Inferred LDA model
+ */
+ def run(documents: RDD[Document]): DistributedLDAModel = {
--- End diff --
This is what the run method might look like with the LearningState trait
etc. I added description on what it is running from.
```scala
/**
* Learn an LDA model using the given dataset.
*
* @param documents RDD of documents, where each document is represented
as a vector of term
* counts plus an ID. Document IDs must be >= 0.
* @return Inferred LDA model
*/
def runFromBagOfWords(documents: RDD[Document]): DistributedLDAModel = {
val topicSmoothing = if (this.topicSmoothing > 0) {
this.topicSmoothing
} else {
(50.0 / k) + 1.0
}
val termSmoothing = if (this.termSmoothing > 0) {
this.termSmoothing
} else {
1.1
}
var state = algorithm match {
case LearningAlgorithm.Gibbs =>
LDA.GibbsLearningState.initialStateFromBagOfWords(documents, k, topicSmoothing,
termSmoothing, seed)
case LearningAlgorithm.EM =>
LDA.GibbsLearningState.initialStateFromBagOfWords(documents, k, topicSmoothing,
termSmoothing, seed)
}
var iter = 0
while (iter < maxIterations) {
state = state.next()
iter += 1
}
new DistributedLDAModel(state)
}
def runFromEdges(edges: RDD[(WordId, DocId)]): DistributedLDAModel = {
val topicSmoothing = if (this.topicSmoothing > 0) {
this.topicSmoothing
} else {
(50.0 / k) + 1.0
}
val termSmoothing = if (this.termSmoothing > 0) {
this.termSmoothing
} else {
1.1
}
var state = algorithm match {
case LearningAlgorithm.Gibbs =>
LDA.GibbsLearningState.initialStateFromEdges(edges, k, topicSmoothing,
termSmoothing, seed)
case LearningAlgorithm.EM =>
LDA.GibbsLearningState.initialStateFromEdges(edges, k, topicSmoothing,
termSmoothing, seed)
}
var iter = 0
while (iter < maxIterations) {
state = state.next()
iter += 1
}
new DistributedLDAModel(state)
}
```
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]