mob-ai commented on a change in pull request #26124: [SPARK-29224][ML]Implement Factorization Machines as a ml-pipeline component URL: https://github.com/apache/spark/pull/26124#discussion_r356381039
########## File path: mllib/src/main/scala/org/apache/spark/ml/regression/FMRegressor.scala ########## @@ -0,0 +1,839 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.ml.regression + +import scala.util.Random + +import breeze.linalg.{axpy => brzAxpy, norm => brzNorm, Vector => BV} +import breeze.numerics.{sqrt => brzSqrt} +import org.apache.hadoop.fs.Path + +import org.apache.spark.annotation.Since +import org.apache.spark.internal.Logging +import org.apache.spark.ml.{PredictionModel, Predictor, PredictorParams} +import org.apache.spark.ml.linalg._ +import org.apache.spark.ml.linalg.BLAS._ +import org.apache.spark.ml.param._ +import org.apache.spark.ml.param.shared._ +import org.apache.spark.ml.regression.FactorizationMachines._ +import org.apache.spark.ml.util._ +import org.apache.spark.ml.util.Instrumentation.instrumented +import org.apache.spark.mllib.{linalg => OldLinalg} +import org.apache.spark.mllib.linalg.{Vector => OldVector, Vectors => OldVectors} +import org.apache.spark.mllib.linalg.VectorImplicits._ +import org.apache.spark.mllib.optimization.{Gradient, GradientDescent, SquaredL2Updater, Updater} +import org.apache.spark.mllib.util.MLUtils +import org.apache.spark.rdd.RDD +import org.apache.spark.sql.{Dataset, Row} +import org.apache.spark.sql.functions.col +import org.apache.spark.storage.StorageLevel + +/** + * Params for Factorization Machines + */ +private[ml] trait FactorizationMachinesParams + extends PredictorParams + with HasMaxIter with HasStepSize with HasTol with HasSolver with HasSeed { + + /** + * Param for dimensionality of the factors (>= 0) + * @group param + */ + @Since("3.0.0") + final val factorSize: IntParam = new IntParam(this, "factorSize", + "Dimensionality of the factor vectors, " + + "which are used to get pairwise interactions between variables", + ParamValidators.gt(0)) + + /** @group getParam */ + @Since("3.0.0") + final def getFactorSize: Int = $(factorSize) + + /** + * Param for whether to fit global bias term + * @group param + */ + @Since("3.0.0") + final val fitBias: BooleanParam = new BooleanParam(this, "fitBias", + "whether to fit global bias term") + + /** @group getParam */ + @Since("3.0.0") + final def getFitBias: Boolean = $(fitBias) + + /** + * Param for whether to fit linear term (aka 1-way term) + * @group param + */ + @Since("3.0.0") + final val fitLinear: BooleanParam = new BooleanParam(this, "fitLinear", + "whether to fit linear term (aka 1-way term)") + + /** @group getParam */ + @Since("3.0.0") + final def getFitLinear: Boolean = $(fitLinear) + + /** + * Param for L2 regularization parameter (>= 0) + * @group param + */ + @Since("3.0.0") + final val regParam: DoubleParam = new DoubleParam(this, "regParam", + "the magnitude of L2-regularization", ParamValidators.gtEq(0)) + + /** @group getParam */ + @Since("3.0.0") + final def getRegParam: Double = $(regParam) + + /** + * Param for mini-batch fraction, must be in range (0, 1] + * @group param + */ + @Since("3.0.0") + final val miniBatchFraction: DoubleParam = new DoubleParam(this, "miniBatchFraction", + "fraction of the input data set that should be used for one iteration of gradient descent", + ParamValidators.inRange(0, 1, false, true)) + + /** @group getParam */ + @Since("3.0.0") + final def getMiniBatchFraction: Double = $(miniBatchFraction) + + /** + * Param for standard deviation of initial coefficients + * @group param + */ + @Since("3.0.0") + final val initStd: DoubleParam = new DoubleParam(this, "initStd", + "standard deviation of initial coefficients", ParamValidators.gt(0)) + + /** @group getParam */ + @Since("3.0.0") + final def getInitStd: Double = $(initStd) + + /** + * The solver algorithm for optimization. + * Supported options: "gd", "adamW". + * Default: "adamW" + * + * @group param + */ + @Since("3.0.0") + final override val solver: Param[String] = new Param[String](this, "solver", + "The solver algorithm for optimization. Supported options: " + + s"${supportedSolvers.mkString(", ")}. (Default adamW)", + ParamValidators.inArray[String](supportedSolvers)) +} + +private[ml] trait FactorizationMachines extends FactorizationMachinesParams { + + private[ml] def initCoefficients(numFeatures: Int): OldVector = { + val rnd = new Random($(seed)) + val initialCoefficients = + OldVectors.dense( + Array.fill($(factorSize) * numFeatures)(rnd.nextGaussian() * $(initStd)) ++ + (if ($(fitLinear)) new Array[Double](numFeatures) else Array.emptyDoubleArray) ++ + (if ($(fitBias)) new Array[Double](1) else Array.emptyDoubleArray)) + initialCoefficients + } + + private[ml] def trainImpl( + data: RDD[(Double, OldVector)], + numFeatures: Int, + loss: String + ): Vector = { + + // initialize coefficients + val initialCoefficients = initCoefficients(numFeatures) + val coefficientsSize = initialCoefficients.size + + // optimize coefficients with gradient descent + val gradient = parseLoss(loss, $(factorSize), $(fitBias), $(fitLinear), numFeatures) + + val updater = parseSolver($(solver), coefficientsSize) + + val optimizer = new GradientDescent(gradient, updater) + .setStepSize($(stepSize)) + .setNumIterations($(maxIter)) + .setRegParam($(regParam)) + .setMiniBatchFraction($(miniBatchFraction)) + .setConvergenceTol($(tol)) + val coefficients = optimizer.optimize(data, initialCoefficients) + coefficients.asML + } +} + +private[ml] object FactorizationMachines { + + /** String name for "gd". */ + val GD = "gd" + + /** String name for "adamW". */ + val AdamW = "adamW" + + /** Set of solvers that FactorizationMachines supports. */ + val supportedSolvers = Array(GD, AdamW) + + /** String name for "logisticLoss". */ + val LogisticLoss = "logisticLoss" + + /** String name for "squaredError". */ + val SquaredError = "squaredError" + + /** Set of loss function names that FactorizationMachines supports. */ + val supportedRegressorLosses = Array(SquaredError) + val supportedClassifierLosses = Array(LogisticLoss) + val supportedLosses = supportedRegressorLosses ++ supportedClassifierLosses + + def parseSolver(solver: String, coefficientsSize: Int): Updater = { + solver match { + case GD => new SquaredL2Updater() + case AdamW => new AdamWUpdater(coefficientsSize) + } + } + + def parseLoss( + lossFunc: String, + factorSize: Int, + fitBias: Boolean, + fitLinear: Boolean, + numFeatures: Int + ): BaseFactorizationMachinesGradient = { + + lossFunc match { + case LogisticLoss => + new LogisticFactorizationMachinesGradient(factorSize, fitBias, fitLinear, numFeatures) + case SquaredError => + new MSEFactorizationMachinesGradient(factorSize, fitBias, fitLinear, numFeatures) + case _ => throw new IllegalArgumentException(s"loss function type $lossFunc is invalidation") + } + } + + def splitCoefficients( + coefficients: Vector, + numFeatures: Int, + factorSize: Int, + fitBias: Boolean, + fitLinear: Boolean + ): (Double, Vector, Matrix) = { + + val coefficientsSize = numFeatures * factorSize + + (if (fitLinear) numFeatures else 0) + (if (fitBias) 1 else 0) + require(coefficientsSize == coefficients.size, + s"coefficients.size did not match the excepted size ${coefficientsSize}") + + val bias = if (fitBias) coefficients(coefficients.size - 1) else 0.0 Review comment: It's a suggestion of zhengruifeng. GD method uses mllib.Vector, but model predict uses ml.Vector. So he suggested to impl it in the .ml side to avoid: 1, vector conversion from .ml to .mllib 2, having two model coefficients in memory, the FMModel is usually much larger than other linear models. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected] With regards, Apache Git Services --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
