Github user myui commented on a diff in the pull request:

    https://github.com/apache/incubator-hivemall/pull/66#discussion_r111728398
  
    --- Diff: core/src/main/java/hivemall/topicmodel/OnlineLDAModel.java ---
    @@ -350,66 +348,80 @@ private float computeApproxBoundForMiniBatch() {
             float score = 0.f;
     
             // prepare
    -        final float[] gammaSum = new float[_miniBatchSize];
    -        for (int d = 0; d < gammaSum.length; d++) {
    -            gammaSum[d] += MathUtils.sum(_gamma[d], _K);
    +        float[] gammaSum = new float[_miniBatchSize];
    +        Arrays.fill(gammaSum, 0.f);
    +        for (int d = 0; d < _miniBatchSize; d++) {
    +            for (int k = 0; k < _K; k++) {
    +                gammaSum[d] += _gamma[d][k];
    +            }
             }
    -        final float[] lambdaSum = new float[_K];
    -        for (float[] lambda_l : _lambda.values()) {
    -            MathUtils.add(lambdaSum, lambda_l, _K);
    +        float[] lambdaSum = new float[_K];
    +        Arrays.fill(lambdaSum, 0.f);
    +        for (int k = 0; k < _K; k++) {
    +            for (String label : _lambda.keySet()) {
    +                lambdaSum[k] += _lambda.get(label)[k];
    +            }
             }
     
    -        // E[log p(docs | theta, beta)]
             for (int d = 0; d < _miniBatchSize; d++) {
    -            // for each word in the document
    -            for (String label : _miniBatchMap.get(d).keySet()) {
    -                float wordCount = _miniBatchMap.get(d).get(label);
    -
    -                float tmp = 0.f;
    +            // E[log p(doc | theta, beta)]
    +            Map<String, Float> doc = _miniBatchMap.get(d);
    +            for (String label : doc.keySet()) { // for each word in the 
document
    +                float[] temp = new float[_K];
    +                float max = Float.MIN_VALUE;
                     for (int k = 0; k < _K; k++) {
                         float eLogTheta_dk = (float) 
(Gamma.digamma(_gamma[d][k]) - Gamma.digamma(gammaSum[d]));
    -                    float eLogBeta_kw = 0.f;
    -                    if (_lambda.containsKey(d)) {
    -                        eLogBeta_kw = (float) 
(Gamma.digamma(_lambda.get(d)[k]
    -                                - Gamma.digamma(lambdaSum[k])));
    +                    float eLogBeta_kw = (float) 
(Gamma.digamma(_lambda.get(label)[k])
    +                            - Gamma.digamma(lambdaSum[k]));
    +
    +                    temp[k] = eLogTheta_dk + eLogBeta_kw;
    +                    if (temp[k] > max) {
    +                        max = temp[k];
                         }
    +                }
     
    -                    tmp += _phi.get(d).get(label)[k]
    -                            * (eLogTheta_dk + eLogBeta_kw - 
Math.log(_phi.get(d).get(label)[k]));
    +                float logsumexp = 0.f;
    +                for (int k = 0; k < _K; k++) {
    +                    logsumexp += (float) Math.exp(temp[k] - max);
                     }
    -                score += wordCount * tmp;
    +                logsumexp = max + (float) Math.log(logsumexp);
    +
    +                // sum( wordCount * logsumexp )
    +                score += doc.get(label) * logsumexp;
                 }
     
                 // E[log p(theta | alpha) - log q(theta | gamma)]
    -            score -= Gamma.logGamma(gammaSum[d]);
    -            float tmp = 0.f;
                 for (int k = 0; k < _K; k++) {
    -                float gamma_dk = _gamma[d][k];
    -                tmp += (_alpha - gamma_dk) * (Gamma.digamma(gamma_dk) - 
Gamma.digamma(gammaSum[d]))
    -                        + Gamma.logGamma(gamma_dk);
    -                tmp /= _docCount;
    +                // sum( (alpha - gammad) * Elogthetad )
    +                score += (_alpha - _gamma[d][k])
    +                        * (float) (Gamma.digamma(_gamma[d][k]) - 
Gamma.digamma(gammaSum[d]));
    +
    +                // sum( gammaln(gammad) - gammaln(alpha) )
    +                score += (float) Gamma.logGamma(_gamma[d][k]) - (float) 
Gamma.logGamma(_alpha);
                 }
    -            score += tmp;
    +            score += (float) Gamma.logGamma(_K * _alpha); // 
gammaln(sum(alpha))
    +            score -= Gamma.logGamma(gammaSum[d]); // gammaln(sum(gammad))
    +        }
     
    -            // E[log p(beta | eta) - log q (beta | lambda)]
    -            tmp = 0.f;
    -            for (int k = 0; k < _K; k++) {
    -                float tmpPartial = 0.f;
    -                for (String label : _lambda.keySet()) {
    -                    tmpPartial += (_eta - _lambda.get(label)[k])
    -                            * (float) 
(Gamma.digamma(_lambda.get(label)[k]) - Gamma.digamma(lambdaSum[k]))
    -                            * (float) 
(Gamma.logGamma(_lambda.get(label)[k]));
    -                }
    +        // assuming likelihood for when corpus in the documents is only a 
subset of the whole corpus
    +        // (i.e., online setting)
    +        // likelihood is always roughly on the same scale
    +        score *= _docRatio;
     
    -                tmp += (-1.f * (float) Gamma.logGamma(lambdaSum[k]) - 
tmpPartial);
    +        // E[log p(beta | eta) - log q (beta | lambda)]
    +        float etaSum = _eta * _lambda.size(); // vocabSize * eta
    +        for (int k = 0; k < _K; k++) {
    +            for (String label : _lambda.keySet()) {
    --- End diff --
    
    ```java
     for (String label : _lambda.keySet()) {
        _lambda.get(label)
    ```
    
    better to be replaced with
    
    ```java
    for(float[] lambda: _lambda.values())
    ```
    
    Avoid hash table lookups.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

Reply via email to