Updated Branches:
  refs/heads/master 73dfd42fb -> fe8a3546f

Correct L2 regularized weight update with canonical form


Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/e91ad3f1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/e91ad3f1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/e91ad3f1

Branch: refs/heads/master
Commit: e91ad3f164b64e727f41ced6ae20d70ca4c92521
Parents: d749d47
Author: Sean Owen <so...@cloudera.com>
Authored: Sat Jan 18 12:53:01 2014 +0000
Committer: Sean Owen <so...@cloudera.com>
Committed: Sat Jan 18 12:53:01 2014 +0000

----------------------------------------------------------------------
 .../scala/org/apache/spark/mllib/optimization/Updater.scala    | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/e91ad3f1/mllib/src/main/scala/org/apache/spark/mllib/optimization/Updater.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/optimization/Updater.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/optimization/Updater.scala
index 4c51f4f..37124f2 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/optimization/Updater.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/optimization/Updater.scala
@@ -86,13 +86,17 @@ class L1Updater extends Updater {
 
 /**
  * Updater that adjusts the learning rate and performs L2 regularization
+ *
+ * See, for example, explanation of gradient and loss with L2 regularization 
on slide 21-22
+ * of <a 
href="http://people.cs.umass.edu/~sheldon/teaching/2012fa/ml/files/lec7-annotated.pdf";>
+ * these slides</a>.
  */
 class SquaredL2Updater extends Updater {
   override def compute(weightsOld: DoubleMatrix, gradient: DoubleMatrix,
       stepSize: Double, iter: Int, regParam: Double): (DoubleMatrix, Double) = 
{
     val thisIterStepSize = stepSize / math.sqrt(iter)
     val normGradient = gradient.mul(thisIterStepSize)
-    val newWeights = weightsOld.sub(normGradient).div(2.0 * thisIterStepSize * 
regParam + 1.0)
+    val newWeights = weightsOld.mul(1.0 - 2.0 * thisIterStepSize * 
regParam).sub(normGradient)
     (newWeights, pow(newWeights.norm2, 2.0) * regParam)
   }
 }

Reply via email to