Github user dbtsai commented on a diff in the pull request:

    https://github.com/apache/spark/pull/10743#discussion_r50032693
  
    --- Diff: 
mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
 ---
    @@ -276,113 +276,123 @@ class LogisticRegression @Since("1.2.0") (
         val numClasses = histogram.length
         val numFeatures = summarizer.mean.size
     
    -    if (numInvalid != 0) {
    -      val msg = s"Classification labels should be in {0 to ${numClasses - 
1} " +
    -        s"Found $numInvalid invalid labels."
    -      logError(msg)
    -      throw new SparkException(msg)
    -    }
    -
    -    if (numClasses > 2) {
    -      val msg = s"Currently, LogisticRegression with ElasticNet in ML 
package only supports " +
    -        s"binary classification. Found $numClasses in the input dataset."
    -      logError(msg)
    -      throw new SparkException(msg)
    -    }
    +    val (coefficients, intercept, objectiveHistory) = {
    +      if (numInvalid != 0) {
    +        val msg = s"Classification labels should be in {0 to ${numClasses 
- 1} " +
    +          s"Found $numInvalid invalid labels."
    +        logError(msg)
    +        throw new SparkException(msg)
    +      }
     
    -    val featuresMean = summarizer.mean.toArray
    -    val featuresStd = summarizer.variance.toArray.map(math.sqrt)
    +      if (numClasses > 2) {
    +        val msg = s"Currently, LogisticRegression with ElasticNet in ML 
package only supports " +
    +          s"binary classification. Found $numClasses in the input dataset."
    +        logError(msg)
    +        throw new SparkException(msg)
    +      } else if ($(fitIntercept) && numClasses == 2 && histogram(0) == 
0.0) {
    +        logWarning(s"All labels are one and fitIntercept=true, so the 
coefficients will be " +
    +          s"zeros and the intercept will be positive infinity; as a 
result, " +
    +          s"training is not needed.")
    +        (Vectors.sparse(numFeatures, Seq()), Double.PositiveInfinity, 
Array.empty[Double])
    +      } else if ($(fitIntercept) && numClasses == 1) {
    +        logWarning(s"All labels are one and fitIntercept=true, so the 
coefficients will be " +
    +          s"zeros and the intercept will be negative infinity; as a 
result, " +
    +          s"training is not needed.")
    +        (Vectors.sparse(numFeatures, Seq()), Double.NegativeInfinity, 
Array.empty[Double])
    +      } else {
    +        val featuresMean = summarizer.mean.toArray
    +        val featuresStd = summarizer.variance.toArray.map(math.sqrt)
     
    -    val regParamL1 = $(elasticNetParam) * $(regParam)
    -    val regParamL2 = (1.0 - $(elasticNetParam)) * $(regParam)
    +        val regParamL1 = $(elasticNetParam) * $(regParam)
    +        val regParamL2 = (1.0 - $(elasticNetParam)) * $(regParam)
     
    -    val costFun = new LogisticCostFun(instances, numClasses, 
$(fitIntercept), $(standardization),
    -      featuresStd, featuresMean, regParamL2)
    +        val costFun = new LogisticCostFun(instances, numClasses, 
$(fitIntercept),
    +          $(standardization), featuresStd, featuresMean, regParamL2)
     
    -    val optimizer = if ($(elasticNetParam) == 0.0 || $(regParam) == 0.0) {
    -      new BreezeLBFGS[BDV[Double]]($(maxIter), 10, $(tol))
    -    } else {
    -      def regParamL1Fun = (index: Int) => {
    -        // Remove the L1 penalization on the intercept
    -        if (index == numFeatures) {
    -          0.0
    +        val optimizer = if ($(elasticNetParam) == 0.0 || $(regParam) == 
0.0) {
    +          new BreezeLBFGS[BDV[Double]]($(maxIter), 10, $(tol))
             } else {
    -          if ($(standardization)) {
    -            regParamL1
    -          } else {
    -            // If `standardization` is false, we still standardize the data
    -            // to improve the rate of convergence; as a result, we have to
    -            // perform this reverse standardization by penalizing each 
component
    -            // differently to get effectively the same objective function 
when
    -            // the training dataset is not standardized.
    -            if (featuresStd(index) != 0.0) regParamL1 / featuresStd(index) 
else 0.0
    +          def regParamL1Fun = (index: Int) => {
    +            // Remove the L1 penalization on the intercept
    +            if (index == numFeatures) {
    +              0.0
    +            } else {
    +              if ($(standardization)) {
    +                regParamL1
    +              } else {
    +                // If `standardization` is false, we still standardize the 
data
    +                // to improve the rate of convergence; as a result, we 
have to
    +                // perform this reverse standardization by penalizing each 
component
    +                // differently to get effectively the same objective 
function when
    +                // the training dataset is not standardized.
    +                if (featuresStd(index) != 0.0) regParamL1 / 
featuresStd(index) else 0.0
    +              }
    +            }
               }
    +          new BreezeOWLQN[Int, BDV[Double]]($(maxIter), 10, regParamL1Fun, 
$(tol))
             }
    -      }
    -      new BreezeOWLQN[Int, BDV[Double]]($(maxIter), 10, regParamL1Fun, 
$(tol))
    -    }
    -
    -    val initialCoefficientsWithIntercept =
    -      Vectors.zeros(if ($(fitIntercept)) numFeatures + 1 else numFeatures)
    -
    -    if ($(fitIntercept)) {
    -      /*
    -         For binary logistic regression, when we initialize the 
coefficients as zeros,
    -         it will converge faster if we initialize the intercept such that
    -         it follows the distribution of the labels.
    -
    -         {{{
    -         P(0) = 1 / (1 + \exp(b)), and
    -         P(1) = \exp(b) / (1 + \exp(b))
    -         }}}, hence
    -         {{{
    -         b = \log{P(1) / P(0)} = \log{count_1 / count_0}
    -         }}}
    -       */
    -      initialCoefficientsWithIntercept.toArray(numFeatures)
    -        = math.log(histogram(1) / histogram(0))
    -    }
     
    -    val states = optimizer.iterations(new CachedDiffFunction(costFun),
    -      initialCoefficientsWithIntercept.toBreeze.toDenseVector)
    +        val initialCoefficientsWithIntercept =
    +          Vectors.zeros(if ($(fitIntercept)) numFeatures + 1 else 
numFeatures)
    +
    +        if ($(fitIntercept)) {
    +          /*
    --- End diff --
    
    Still off. Add one char in the following line.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to