Github user jkbradley commented on a diff in the pull request:
https://github.com/apache/spark/pull/7705#discussion_r35726236
--- Diff:
mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala ---
@@ -385,59 +387,52 @@ final class OnlineLDAOptimizer extends LDAOptimizer {
iteration += 1
val k = this.k
val vocabSize = this.vocabSize
- val Elogbeta = dirichletExpectation(lambda).t
- val expElogbeta = exp(Elogbeta)
+ val expElogbeta = exp(LDAUtils.dirichletExpectation(lambda)).t
val alpha = this.alpha.toBreeze
val gammaShape = this.gammaShape
- val stats: RDD[BDM[Double]] = batch.mapPartitions { docs =>
+ val stats: RDD[(BDM[Double], List[BDV[Double]])] = batch.mapPartitions
{ docs =>
val stat = BDM.zeros[Double](k, vocabSize)
- docs.foreach { doc =>
- val termCounts = doc._2
- val (ids: List[Int], cts: Array[Double]) = termCounts match {
- case v: DenseVector => ((0 until v.size).toList, v.values)
- case v: SparseVector => (v.indices.toList, v.values)
- case v => throw new IllegalArgumentException("Online LDA does
not support vector type "
- + v.getClass)
+ var gammaPart = List[BDV[Double]]()
+ docs.foreach { case (_, termCounts: Vector) =>
+ val ids: List[Int] = termCounts match {
+ case v: DenseVector => (0 until v.size).toList
+ case v: SparseVector => v.indices.toList
}
if (!ids.isEmpty) {
-
- // Initialize the variational distribution q(theta|gamma) for
the mini-batch
- val gammad: BDV[Double] =
- new Gamma(gammaShape, 1.0 / gammaShape).samplesVector(k) // K
- val expElogthetad: BDV[Double] = exp(digamma(gammad) -
digamma(sum(gammad))) // K
- val expElogbetad: BDM[Double] = expElogbeta(ids,
::).toDenseMatrix // ids * K
-
- val phinorm: BDV[Double] = expElogbetad * expElogthetad :+
1e-100 // ids
- var meanchange = 1D
- val ctsVector = new BDV[Double](cts) // ids
-
- // Iterate between gamma and phi until convergence
- while (meanchange > 1e-3) {
- val lastgamma = gammad.copy
- // K K * ids ids
- gammad := (expElogthetad :* (expElogbetad.t * (ctsVector :/
phinorm))) :+ alpha
- expElogthetad := exp(digamma(gammad) - digamma(sum(gammad)))
- phinorm := expElogbetad * expElogthetad :+ 1e-100
- meanchange = sum(abs(gammad - lastgamma)) / k
- }
-
- stat(::, ids) := expElogthetad.asDenseMatrix.t * (ctsVector :/
phinorm).asDenseMatrix
+ val (gammad, sstats) =
OnlineLDAOptimizer.variationalTopicInference(
+ termCounts, expElogbeta, alpha, gammaShape, k)
+ stat(::, ids) := sstats
+ gammaPart = gammad :: gammaPart
}
}
- Iterator(stat)
+ Iterator((stat, gammaPart))
}
-
- val statsSum: BDM[Double] = stats.reduce(_ += _)
+ val statsSum: BDM[Double] = stats.map(_._1).reduce(_ += _)
+ val gammat: BDM[Double] = breeze.linalg.DenseMatrix.vertcat(
--- End diff --
Is this being kept around for some later PR to use?
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]