Github user yanboliang commented on a diff in the pull request:

    https://github.com/apache/spark/pull/12819#discussion_r79561754
  
    --- Diff: 
mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala ---
    @@ -109,10 +119,88 @@ class NaiveBayes @Since("1.5.0") (
             s" numClasses=$numClasses, but thresholds has length 
${$(thresholds).length}")
         }
     
    -    val oldDataset: RDD[OldLabeledPoint] =
    -      extractLabeledPoints(dataset).map(OldLabeledPoint.fromML)
    -    val oldModel = OldNaiveBayes.train(oldDataset, $(smoothing), 
$(modelType))
    -    NaiveBayesModel.fromOld(oldModel, this)
    +    val numFeatures = 
dataset.select(col($(featuresCol))).head().getAs[Vector](0).size
    +
    +    val requireNonnegativeValues: Vector => Unit = (v: Vector) => {
    +      val values = v match {
    +        case sv: SparseVector => sv.values
    +        case dv: DenseVector => dv.values
    +      }
    +      if (!values.forall(_ >= 0.0)) {
    +        throw new SparkException(s"Naive Bayes requires nonnegative 
feature values but found $v.")
    +      }
    +    }
    +
    +    val requireZeroOneBernoulliValues: Vector => Unit = (v: Vector) => {
    +      val values = v match {
    +        case sv: SparseVector => sv.values
    +        case dv: DenseVector => dv.values
    +      }
    +      if (!values.forall(v => v == 0.0 || v == 1.0)) {
    +        throw new SparkException(
    +          s"Bernoulli naive Bayes requires 0 or 1 feature values but found 
$v.")
    +      }
    +    }
    +
    +    val requireValues: Vector => Unit = {
    +      $(modelType) match {
    +        case Multinomial =>
    +          requireNonnegativeValues
    +        case Bernoulli =>
    +          requireZeroOneBernoulliValues
    +        case _ =>
    +          // This should never happen.
    +          throw new UnknownError(s"Invalid modelType: ${$(modelType)}.")
    +      }
    +    }
    +
    +    val w = if (!isDefined(weightCol) || $(weightCol).isEmpty) lit(1.0) 
else col($(weightCol))
    +
    +    val aggregated = dataset.select(col($(labelCol)).cast(DoubleType), w, 
col($(featuresCol))).rdd
    --- End diff --
    
    It's better to keep the original comments and TODOs here, and it can help 
developers or users to improve the code continuously.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to