srowen commented on a change in pull request #35893:
URL: https://github.com/apache/spark/pull/35893#discussion_r829082655
##########
File path: mllib/src/main/scala/org/apache/spark/ml/util/DatasetUtils.scala
##########
@@ -17,16 +17,57 @@
package org.apache.spark.ml.util
-import org.apache.spark.ml.linalg.{Vector, Vectors, VectorUDT}
+import org.apache.spark.ml.linalg._
import org.apache.spark.mllib.linalg.{Vector => OldVector, Vectors =>
OldVectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Column, Dataset, Row}
-import org.apache.spark.sql.functions.{col, udf}
+import org.apache.spark.sql.functions.{col, lit, udf}
import org.apache.spark.sql.types.{ArrayType, DoubleType, FloatType}
private[spark] object DatasetUtils {
+ private[ml] def getBinaryLabelCol(labelCol: String) = {
+ checkBinaryLabel(col(labelCol).cast(DoubleType))
+ }
+
+ private[ml] def getNonNegativeWeightCol(weightCol: Option[String]) =
weightCol match {
+ case Some(w) if w.nonEmpty =>
checkNonNegativeWeight(col(w).cast(DoubleType))
+ case _ => lit(1.0)
+ }
+
+ private[ml] def getNonNanVectorCol(featuresCol: String) = {
+ checkNonNanVector(col(featuresCol))
+ }
+
+ private def checkBinaryLabel = udf {
Review comment:
Rather than use UDFs, I wonder if it's faster to just check whether
isNaN() is true for any values using Spark SQL functions? likewise checking for
0/1, etc.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]