Github user jkbradley commented on a diff in the pull request:

    https://github.com/apache/spark/pull/7705#discussion_r35726237
  
    --- Diff: 
mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala ---
    @@ -385,59 +387,52 @@ final class OnlineLDAOptimizer extends LDAOptimizer {
         iteration += 1
         val k = this.k
         val vocabSize = this.vocabSize
    -    val Elogbeta = dirichletExpectation(lambda).t
    -    val expElogbeta = exp(Elogbeta)
    +    val expElogbeta = exp(LDAUtils.dirichletExpectation(lambda)).t
         val alpha = this.alpha.toBreeze
         val gammaShape = this.gammaShape
     
    -    val stats: RDD[BDM[Double]] = batch.mapPartitions { docs =>
    +    val stats: RDD[(BDM[Double], List[BDV[Double]])] = batch.mapPartitions 
{ docs =>
           val stat = BDM.zeros[Double](k, vocabSize)
    -      docs.foreach { doc =>
    -        val termCounts = doc._2
    -        val (ids: List[Int], cts: Array[Double]) = termCounts match {
    -          case v: DenseVector => ((0 until v.size).toList, v.values)
    -          case v: SparseVector => (v.indices.toList, v.values)
    -          case v => throw new IllegalArgumentException("Online LDA does 
not support vector type "
    -            + v.getClass)
    +      var gammaPart = List[BDV[Double]]()
    +      docs.foreach { case (_, termCounts: Vector) =>
    +        val ids: List[Int] = termCounts match {
    +          case v: DenseVector => (0 until v.size).toList
    +          case v: SparseVector => v.indices.toList
             }
             if (!ids.isEmpty) {
    -
    -          // Initialize the variational distribution q(theta|gamma) for 
the mini-batch
    -          val gammad: BDV[Double] =
    -            new Gamma(gammaShape, 1.0 / gammaShape).samplesVector(k) // K
    -          val expElogthetad: BDV[Double] = exp(digamma(gammad) - 
digamma(sum(gammad))) // K
    -          val expElogbetad: BDM[Double] = expElogbeta(ids, 
::).toDenseMatrix // ids * K
    -
    -          val phinorm: BDV[Double] = expElogbetad * expElogthetad :+ 
1e-100 // ids
    -          var meanchange = 1D
    -          val ctsVector = new BDV[Double](cts) // ids
    -
    -          // Iterate between gamma and phi until convergence
    -          while (meanchange > 1e-3) {
    -            val lastgamma = gammad.copy
    -            //        K                  K * ids               ids
    -            gammad := (expElogthetad :* (expElogbetad.t * (ctsVector :/ 
phinorm))) :+ alpha
    -            expElogthetad := exp(digamma(gammad) - digamma(sum(gammad)))
    -            phinorm := expElogbetad * expElogthetad :+ 1e-100
    -            meanchange = sum(abs(gammad - lastgamma)) / k
    -          }
    -
    -          stat(::, ids) := expElogthetad.asDenseMatrix.t * (ctsVector :/ 
phinorm).asDenseMatrix
    +          val (gammad, sstats) = 
OnlineLDAOptimizer.variationalTopicInference(
    +            termCounts, expElogbeta, alpha, gammaShape, k)
    +          stat(::, ids) := sstats
    +          gammaPart = gammad :: gammaPart
             }
           }
    -      Iterator(stat)
    +      Iterator((stat, gammaPart))
         }
    -
    -    val statsSum: BDM[Double] = stats.reduce(_ += _)
    +    val statsSum: BDM[Double] = stats.map(_._1).reduce(_ += _)
    +    val gammat: BDM[Double] = breeze.linalg.DenseMatrix.vertcat(
    +      stats.map(_._2).reduce(_ ++ _).map(_.toDenseMatrix): _*)
         val batchResult = statsSum :* expElogbeta.t
     
         // Note that this is an optimization to avoid batch.count
    -    update(batchResult, iteration, (miniBatchFraction * 
corpusSize).ceil.toInt)
    +    updateLambda(batchResult, (miniBatchFraction * corpusSize).ceil.toInt)
         this
       }
     
    -  override private[clustering] def getLDAModel(iterationTimes: 
Array[Double]): LDAModel = {
    -    new LocalLDAModel(Matrices.fromBreeze(lambda).transpose)
    +  /**
    +   * Update lambda based on the batch submitted. batchSize can be 
different for each iteration.
    +   */
    +  private def updateLambda(stat: BDM[Double], batchSize: Int): Unit = {
    --- End diff --
    
    Should update() be removed?  It seems like this method replaces the old 
update().


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to