Github user Krimit commented on a diff in the pull request:

    https://github.com/apache/spark/pull/17673#discussion_r113602341
  
    --- Diff: mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala 
---
    @@ -194,6 +232,285 @@ final class Word2Vec @Since("1.4.0") (
     
       @Since("1.4.1")
       override def copy(extra: ParamMap): Word2Vec = defaultCopy(extra)
    +
    +  /**
    +   * Similar to InitUnigramTable in the original code. Instead of using an 
array of size 100 million
    +   * like the original, we size it to be 20 times the vocabulary size.
    +   * We sacrifice memory here, to get constant time lookups into this 
array when generating
    +   * negative samples.
    +   */
    +  private def generateUnigramTable(normalizedWeights: Array[Double], 
tableSize: Int): Array[Int] = {
    +    val table = Array.fill(tableSize)(0)
    +    var a = 0
    +    var i = 0
    +    while (a < table.length) {
    +      table.update(a, i)
    +      if (a.toFloat / table.length >= normalizedWeights(i)) {
    +        i = math.min(normalizedWeights.length - 1, i + 1)
    +      }
    +      a += 1
    +    }
    +    table
    +  }
    +
    +  private def generateVocab[S <: Iterable[String]](input: RDD[S]):
    +      (Int, Long, Map[String, Int], Array[Int]) = {
    +    val sc = input.context
    +
    +    val words = input.flatMap(x => x)
    +
    +    val vocab = words.map(w => (w, 1L))
    +      .reduceByKey(_ + _)
    +      .filter{case (w, c) => c >= $(minCount)}
    +      .collect()
    +      .sortWith{case ((w1, c1), (w2, c2)) => c1 > c2}
    +
    +    val totalWordCount = vocab.map(_._2).sum
    +
    +    val vocabMap = vocab.zipWithIndex.map{case ((w, c), i) =>
    +      w -> i
    +    }.toMap
    +
    +    // We create a cumulative distribution array, unlike the original 
implemention
    +    // and use binary search to get insertion points. This should 
replicate the same
    +    // behavior as the table in original implementation.
    +    val weights = vocab.map(x => scala.math.pow(x._2, power))
    +    val totalWeight = weights.sum
    +
    +    val normalizedCumWeights = weights.scanLeft(0.0)(_ + _).tail.map(x => 
(x / totalWeight))
    +
    +    val unigramTableSize =
    +      math.min(maxUnigramTableSize, unigramTableSizeFactor * 
normalizedCumWeights.length)
    +    val unigramTable = generateUnigramTable(normalizedCumWeights, 
unigramTableSize)
    +
    +    (vocabMap.size, totalWordCount, vocabMap, unigramTable)
    +  }
    +
    +  /**
    +   * This method implements Word2Vec Continuous Bag Of Words based 
implementation using
    +   * negative sampling optimization, using BLAS for vectorizing operations 
where applicable.
    +   * The algorithm is parallelized in the same way as the skip-gram based 
estimation.
    +   * @param input
    +   * @return
    +   */
    +  private def fitCBOW[S <: Iterable[String]](input: RDD[S]): 
feature.Word2VecModel = {
    --- End diff --
    
    I think the CBOW logic should definitely live outside of this class. I'm 
even wondering if it's worthwhile to update the mllib Word2Vec to have access 
to this as well. @MLnick @srowen thoughts?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to