Github user jkbradley commented on a diff in the pull request:

    https://github.com/apache/spark/pull/7760#discussion_r35842202
  
    --- Diff: 
mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala ---
    @@ -281,6 +282,66 @@ class LDASuite extends SparkFunSuite with 
MLlibTestSparkContext {
         assert(ldaModel.logPerplexity(docs) ~== -3.690D relTol 1E-3D)
       }
     
    +  test("LocalLDAModel predict") {
    +    val k = 2
    +    val vocabSize = 6
    +    val alpha = 0.01
    +    val eta = 0.01
    +    val gammaShape = 100
    +    // obtained from LDA model trained in gensim, see below
    +    val topics = new DenseMatrix(numRows = vocabSize, numCols = k, values 
= Array(
    +      1.86738052, 1.94056535, 1.89981687, 0.0833265, 0.07405918, 
0.07940597,
    +      0.15081551, 0.08637973, 0.12428538, 1.9474897, 1.94615165, 
1.95204124))
    +
    +    def toydata: Array[(Long, Vector)] = Array(
    +      Vectors.sparse(6, Array(0, 1), Array(1, 1)),
    +      Vectors.sparse(6, Array(1, 2), Array(1, 1)),
    +      Vectors.sparse(6, Array(0, 2), Array(1, 1)),
    +      Vectors.sparse(6, Array(3, 4), Array(1, 1)),
    +      Vectors.sparse(6, Array(3, 5), Array(1, 1)),
    +      Vectors.sparse(6, Array(4, 5), Array(1, 1))
    +    ).zipWithIndex.map { case (wordCounts, docId) => (docId.toLong, 
wordCounts) }
    +    val docs = sc.parallelize(toydata)
    +
    +    val ldaModel: LocalLDAModel = new LocalLDAModel(
    +      topics, Vectors.dense(Array.fill(k)(alpha)), eta, gammaShape)
    +
    +    /* Verify results using gensim:
    +       import numpy as np
    +       from gensim import models
    +       corpus = [
    +          [(0, 1.0), (1, 1.0)],
    +          [(1, 1.0), (2, 1.0)],
    +          [(0, 1.0), (2, 1.0)],
    +          [(3, 1.0), (4, 1.0)],
    +          [(3, 1.0), (5, 1.0)],
    +          [(4, 1.0), (5, 1.0)]]
    +       np.random.seed(2345)
    +       lda = models.ldamodel.LdaModel(
    +          corpus=corpus, alpha=0.01, eta=0.01, num_topics=2, 
update_every=0, passes=100,
    +          decay=0.51, offset=1024)
    +       print(list(lda.get_document_topics(corpus)))
    +       > [[(0, 0.99504950495049516)], [(0, 0.99504950495049516)],
    +       > [(0, 0.99504950495049516)], [(1, 0.99504950495049516)],
    +       > [(1, 0.99504950495049516)], [(1, 0.99504950495049516)]]
    +     */
    +
    +    val expectedPredictions = List(
    +      (0, 0.99504), (0, 0.99504),
    +      (0, 0.99504), (1, 0.99504),
    +      (1, 0.99504), (1, 0.99504))
    +
    +    expectedPredictions.zip(
    +      ldaModel.topicDistributions(docs).map { case (_, topics) =>
    --- End diff --
    
    This would be easier to read if you created this as a val and then used it 
in the zip.
    
    Also, it'd be good to keep the doc index and sort by it...just to make a 
clear matching.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to