[
https://issues.apache.org/jira/browse/FLINK-1807?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14534044#comment-14534044
]
ASF GitHub Bot commented on FLINK-1807:
---------------------------------------
Github user thvasilo commented on a diff in the pull request:
https://github.com/apache/flink/pull/613#discussion_r29922360
--- Diff:
flink-staging/flink-ml/src/main/scala/org/apache/flink/ml/optimization/Regularization.scala
---
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.ml.optimization
+
+import org.apache.flink.ml.math.{Vector => FlinkVector, BLAS}
+import org.apache.flink.ml.math.Breeze._
+
+import breeze.numerics._
+import breeze.linalg.{norm => BreezeNorm}
+
+
+
+/** Represents a type of regularization penalty
+ *
+ * Regularization penalties are used to restrict the optimization problem
to solutions with
+ * certain desirable characteristics, such as sparsity for the L1
penalty, or penalizing large
+ * weights for the L2 penalty.
+ *
+ * The regularization term, $R(w)$ is added to the objective function,
$f(w) = L(w) + \lambda R(w)$
+ * where $\lambda$ is the regularization parameter used to tune the
amount of regularization
+ * applied.
+ */
+abstract class Regularization extends Serializable {
+
+ /** Updates the weights by taking a step according to the gradient and
regularization applied
+ *
+ * @param oldWeights The weights to be updated
+ * @param gradient The gradient according to which we will update the
weights
+ * @param effectiveStepSize The effective step size for this iteration
+ * @param regParameter The regularization parameter, $\lambda$.
+ */
+ def takeStep(
+ oldWeights: FlinkVector,
+ gradient: FlinkVector,
+ effectiveStepSize: Double,
+ regParameter: Double) {
+ BLAS.axpy(-effectiveStepSize, gradient, oldWeights)
+ }
+
+ /** Adds the regularization term to the loss value
+ *
+ * @param loss The loss value, before applying regularization.
+ * @param weightVector The current vector of weights.
+ * @param regularizationParameter The regularization parameter,
$\lambda$.
+ * @return The loss value with regularization applied.
+ */
+ def regLoss(loss: Double, weightVector: FlinkVector,
regularizationParameter: Double): Double
+
+}
+
+/** Abstract class for regularization penalties that are differentiable
+ *
+ */
+abstract class DiffRegularization extends Regularization {
+
+ /** Compute the regularized gradient loss for the given data.
+ * The provided cumGradient is updated in place.
+ *
+ * @param loss The loss value without regularization.
+ * @param weightVector The current vector of weights.
+ * @param lossGradient The loss gradient, without regularization.
Updated in-place.
+ * @param regParameter The regularization parameter, $\lambda$.
+ * @return The loss value with regularization applied.
+ */
+ def regularizedLossAndGradient(
+ loss: Double,
+ weightVector: FlinkVector,
+ lossGradient: FlinkVector,
+ regParameter: Double) : Double ={
+ val adjustedLoss = regLoss(loss, weightVector, regParameter)
+ regGradient(weightVector, lossGradient, regParameter)
+
+ adjustedLoss
+ }
+
+ /** Adds the regularization gradient term to the loss gradient. The
gradient is updated in place.
+ *
+ * @param weightVector The current vector of weights
+ * @param lossGradient The loss gradient, without regularization.
Updated in-place.
+ * @param regParameter The regularization parameter, $\lambda$.
+ */
+ def regGradient(
+ weightVector: FlinkVector,
+ lossGradient: FlinkVector,
+ regParameter: Double)
+}
+
+/** Performs no regularization, equivalent to $R(w) = 0$ **/
+class NoRegularization extends Regularization {
+ /** Adds the regularization term to the loss value
+ *
+ * @param loss The loss value, before applying regularization
+ * @param weightVector The current vector of weights
+ * @param regParameter The regularization parameter, $\lambda$
+ * @return The loss value with regularization applied.
+ */
+ override def regLoss(
+ loss: Double,
+ weightVector: FlinkVector,
+ regParameter: Double): Double = {loss}
+}
+
+/** $L_2$ regularization penalty.
+ *
+ * Penalizes large weights, favoring solutions with more small weights
rather than few large ones.
+ *
+ */
+class L2Regularization extends DiffRegularization {
+
+ /** Adds the regularization term to the loss value
+ *
+ * @param loss The loss value, before applying regularization
+ * @param weightVector The current vector of weights
+ * @param regParameter The regularization parameter, $\lambda$
+ * @return The loss value with regularization applied.
+ */
+ override def regLoss(loss: Double, weightVector: FlinkVector,
regParameter: Double)
+ : Double = {
+ val brzVector = weightVector.asBreeze
+ loss + regParameter * (brzVector dot brzVector) / 2
+ }
+
+ /** Adds the regularization gradient term to the loss gradient. The
gradient is updated in place.
+ *
+ * @param weightVector The current vector of weights.
+ * @param lossGradient The loss gradient, without regularization.
Updated in-place.
+ * @param regParameter The regularization parameter, $\lambda$.
+ */
+ override def regGradient(
+ weightVector: FlinkVector,
+ lossGradient: FlinkVector,
+ regParameter: Double): Unit = {
+ BLAS.axpy(regParameter, weightVector, lossGradient)
+ }
+}
+
+/** $L_1$ regularization penalty.
+ *
+ * The $L_1$ penalty can be used to drive a number of the solution
coefficients to 0, thereby
+ * producing sparse solutions.
+ *
+ */
+class L1Regularization extends Regularization {
+ /** Calculates and applies the regularization amount and the
regularization parameter
+ *
+ * Implementation was taken from the Apache Spark Mllib library:
+ * http://git.io/vfZIT
+ *
+ * @param oldWeights The weights to be updated
+ * @param gradient The gradient according to which we will update the
weights
+ * @param effectiveStepSize The effective step size for this iteration
+ * @param regParameter The regularization parameter to be applied in
the case of L1
+ * regularization
+ */
+ override def takeStep(
+ oldWeights: FlinkVector,
+ gradient: FlinkVector,
+ effectiveStepSize: Double,
+ regParameter: Double) {
+ BLAS.axpy(-effectiveStepSize, gradient, oldWeights)
+ val brzWeights = oldWeights.asBreeze
+
+ // Apply proximal operator (soft thresholding)
+ val shrinkageVal = regParameter * effectiveStepSize
+ var i = 0
+ while (i < brzWeights.length) {
+ val wi = brzWeights(i)
+ brzWeights(i) = signum(wi) * math.max(0.0, abs(wi) - shrinkageVal)
+ i += 1
+ }
+
+ BLAS.copy(brzWeights.fromBreeze, oldWeights)
+
+ // We could maybe define a Breeze Universal function for the proximal
operator, and test if it's
+ // faster that the for loop + copy above
+ // brzWeights = signum(brzWeights) * max(0.0, abs(brzWeights) -
shrinkageVal)
+
+ }
+
+ /** Adds the regularization term to the loss value
+ *
+ * @param loss The loss value, before applying regularization.
+ * @param weightVector The current vector of weights.
+ * @param regularizationParameter The regularization parameter,
$\lambda$.
+ * @return The loss value with regularization applied.
+ */
+ override def regLoss(loss: Double, weightVector: FlinkVector,
regularizationParameter: Double):
+ Double = {
+ loss + BreezeNorm(weightVector.asBreeze, 1.0) * regularizationParameter
--- End diff --
For the 1-norm it doesn't use pow as far as I can tell:
https://github.com/scalanlp/breeze/blob/master/math/src/main/scala/breeze/linalg/operators/DenseVectorOps.scala#L702
Snippet:
```scala
implicit def canNormField[T:Field]:
norm.Impl2[DenseVector[T],Double,Double] = {
val f = implicitly[Field[T]]
new norm.Impl2[DenseVector[T],Double,Double] {
def apply(v: DenseVector[T],n: Double) = {
import v._
if (n == 1) {
var sum = 0.0
foreach (v => sum += f.sNorm(v) )
sum
} else if (n == 2) {
var sum = 0.0
foreach (v => { val nn = f.sNorm(v); sum += nn * nn })
math.sqrt(sum)
} else if (n == Double.PositiveInfinity) {
var max = 0.0
foreach (v => { val nn = f.sNorm(v); if (nn > max) max = nn })
max
} else {
var sum = 0.0
foreach (v => { val nn = f.sNorm(v); sum += math.pow(nn,n) })
math.pow(sum, 1.0 / n)
}
}
}
}
```
> Stochastic gradient descent optimizer for ML library
> ----------------------------------------------------
>
> Key: FLINK-1807
> URL: https://issues.apache.org/jira/browse/FLINK-1807
> Project: Flink
> Issue Type: Improvement
> Components: Machine Learning Library
> Reporter: Till Rohrmann
> Assignee: Theodore Vasiloudis
> Labels: ML
>
> Stochastic gradient descent (SGD) is a widely used optimization technique in
> different ML algorithms. Thus, it would be helpful to provide a generalized
> SGD implementation which can be instantiated with the respective gradient
> computation. Such a building block would make the development of future
> algorithms easier.
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)