Repository: spark
Updated Branches:
  refs/heads/master b37f0cc1b -> 881208a8e


[SPARK-10230] [MLLIB] Rename optimizeAlpha to optimizeDocConcentration

See [discussion](https://github.com/apache/spark/pull/8254#discussion_r37837770)

CC jkbradley

Author: Feynman Liang <fli...@databricks.com>

Closes #8422 from feynmanliang/SPARK-10230.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/881208a8
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/881208a8
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/881208a8

Branch: refs/heads/master
Commit: 881208a8e849facf54166bdd69d3634407f952e7
Parents: b37f0cc
Author: Feynman Liang <fli...@databricks.com>
Authored: Tue Aug 25 11:58:47 2015 -0700
Committer: Joseph K. Bradley <jos...@databricks.com>
Committed: Tue Aug 25 11:58:47 2015 -0700

----------------------------------------------------------------------
 .../spark/mllib/clustering/LDAOptimizer.scala       | 16 ++++++++--------
 .../apache/spark/mllib/clustering/LDASuite.scala    |  2 +-
 2 files changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/881208a8/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala
index 5c2aae6..38486e9 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala
@@ -258,7 +258,7 @@ final class OnlineLDAOptimizer extends LDAOptimizer {
   private var tau0: Double = 1024
   private var kappa: Double = 0.51
   private var miniBatchFraction: Double = 0.05
-  private var optimizeAlpha: Boolean = false
+  private var optimizeDocConcentration: Boolean = false
 
   // internal data structure
   private var docs: RDD[(Long, Vector)] = null
@@ -335,20 +335,20 @@ final class OnlineLDAOptimizer extends LDAOptimizer {
   }
 
   /**
-   * Optimize alpha, indicates whether alpha (Dirichlet parameter for 
document-topic distribution)
-   * will be optimized during training.
+   * Optimize docConcentration, indicates whether docConcentration (Dirichlet 
parameter for
+   * document-topic distribution) will be optimized during training.
    */
   @Since("1.5.0")
-  def getOptimzeAlpha: Boolean = this.optimizeAlpha
+  def getOptimizeDocConcentration: Boolean = this.optimizeDocConcentration
 
   /**
-   * Sets whether to optimize alpha parameter during training.
+   * Sets whether to optimize docConcentration parameter during training.
    *
    * Default: false
    */
   @Since("1.5.0")
-  def setOptimzeAlpha(optimizeAlpha: Boolean): this.type = {
-    this.optimizeAlpha = optimizeAlpha
+  def setOptimizeDocConcentration(optimizeDocConcentration: Boolean): 
this.type = {
+    this.optimizeDocConcentration = optimizeDocConcentration
     this
   }
 
@@ -458,7 +458,7 @@ final class OnlineLDAOptimizer extends LDAOptimizer {
 
     // Note that this is an optimization to avoid batch.count
     updateLambda(batchResult, (miniBatchFraction * corpusSize).ceil.toInt)
-    if (optimizeAlpha) updateAlpha(gammat)
+    if (optimizeDocConcentration) updateAlpha(gammat)
     this
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/881208a8/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala 
b/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala
index 8a714f9..746a76a 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala
@@ -423,7 +423,7 @@ class LDASuite extends SparkFunSuite with 
MLlibTestSparkContext {
     val k = 2
     val docs = sc.parallelize(toyData)
     val op = new 
OnlineLDAOptimizer().setMiniBatchFraction(1).setTau0(1024).setKappa(0.51)
-      .setGammaShape(100).setOptimzeAlpha(true).setSampleWithReplacement(false)
+      
.setGammaShape(100).setOptimizeDocConcentration(true).setSampleWithReplacement(false)
     val lda = new LDA().setK(k)
       .setDocConcentration(1D / k)
       .setTopicConcentration(0.01)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to