Author: tommaso
Date: Fri Jun 21 19:58:00 2013
New Revision: 1495568

URL: http://svn.apache.org/r1495568
Log:
fixed indent

Modified:
    
labs/yay/trunk/core/src/main/java/org/apache/yay/core/BackPropagationLearningStrategy.java
    
labs/yay/trunk/core/src/main/java/org/apache/yay/core/LogisticRegressionCostFunction.java
    labs/yay/trunk/core/src/main/java/org/apache/yay/core/TanhFunction.java
    
labs/yay/trunk/core/src/test/java/org/apache/yay/core/BackPropagationLearningStrategyTest.java
    
labs/yay/trunk/core/src/test/java/org/apache/yay/core/LogisticRegressionCostFunctionTest.java

Modified: 
labs/yay/trunk/core/src/main/java/org/apache/yay/core/BackPropagationLearningStrategy.java
URL: 
http://svn.apache.org/viewvc/labs/yay/trunk/core/src/main/java/org/apache/yay/core/BackPropagationLearningStrategy.java?rev=1495568&r1=1495567&r2=1495568&view=diff
==============================================================================
--- 
labs/yay/trunk/core/src/main/java/org/apache/yay/core/BackPropagationLearningStrategy.java
 (original)
+++ 
labs/yay/trunk/core/src/main/java/org/apache/yay/core/BackPropagationLearningStrategy.java
 Fri Jun 21 19:58:00 2013
@@ -78,7 +78,7 @@ public class BackPropagationLearningStra
         double newCost = 
costFunction.calculateAggregatedCost(trainingExamples, hypothesis);
 
         if (newCost > cost) {
-          throw new RuntimeException("failed to converge at iteration " + 
iterations + " with alpha "+ alpha +" : cost going from " + cost + " to " + 
newCost);
+          throw new RuntimeException("failed to converge at iteration " + 
iterations + " with alpha " + alpha + " : cost going from " + cost + " to " + 
newCost);
         } else if (cost == newCost || newCost < threshold || iterations > 
MAX_ITERATIONS) {
           System.out.println("successfully converged with alpha " + alpha + " 
after " + iterations + " iterations with cost " + newCost + " and parameters " 
+ Arrays.toString(hypothesis.getParameters()));
           break;
@@ -97,91 +97,90 @@ public class BackPropagationLearningStra
         hypothesis.setParameters(updatedWeights);
 
         iterations++;
-        }
-      }
-      catch (Exception e) {
-        throw new WeightLearningException("error during backprop learning", e);
       }
+    } catch (Exception e) {
+      throw new WeightLearningException("error during backprop learning", e);
+    }
 
     return updatedWeights;
   }
 
-    private RealMatrix[] calculateDerivatives(RealMatrix[] weightsMatrixSet, 
TrainingSet<Double, Double> trainingExamples) throws WeightLearningException {
-        // set up the accumulator matrix(es)
-        RealMatrix[] triangle = new RealMatrix[weightsMatrixSet.length];
-        RealVector[] deltaVectors = new RealVector[weightsMatrixSet.length];
-
-        int noOfMatrixes = weightsMatrixSet.length - 1;
-        double count = 0;
-        for (TrainingExample<Double, Double> trainingExample : 
trainingExamples) {
-          try {
-            // get activations from feed forward propagation
-            RealVector[] activations = 
predictionStrategy.debugOutput(ConversionUtils.toValuesCollection(trainingExample.getFeatures()),
 weightsMatrixSet);
-
-            // calculate output error (corresponding to the last delta^l)
-            RealVector nextLayerDelta = calculateOutputError(trainingExample, 
activations);
-
-            deltaVectors[noOfMatrixes] = nextLayerDelta;
-
-            // back prop the error and update the deltas accordingly
-            for (int l = noOfMatrixes; l > 0; l--) {
-              RealVector currentActivationsVector = activations[l - 1];
-              nextLayerDelta = calculateDeltaVector(weightsMatrixSet[l], 
currentActivationsVector, nextLayerDelta);
-
-              // collect delta vectors for this example
-              deltaVectors[l - 1] = nextLayerDelta;
-            }
-
-            RealVector[] newActivations = new RealVector[activations.length];
-            newActivations[0] = 
ConversionUtils.toRealVector(ConversionUtils.toValuesCollection(trainingExample.getFeatures()));
-            for (int k = 0; k < activations.length - 1; k++) {
-                newActivations[k+1] = activations[k];
-            }
-
-            // update triangle (big delta matrix)
-            updateTriangle(triangle, newActivations, deltaVectors, 
weightsMatrixSet);
-
-          } catch (Exception e) {
-            throw new WeightLearningException("error during derivatives 
calculation", e);
-          }
-          count++;
+  private RealMatrix[] calculateDerivatives(RealMatrix[] weightsMatrixSet, 
TrainingSet<Double, Double> trainingExamples) throws WeightLearningException {
+    // set up the accumulator matrix(es)
+    RealMatrix[] triangle = new RealMatrix[weightsMatrixSet.length];
+    RealVector[] deltaVectors = new RealVector[weightsMatrixSet.length];
+
+    int noOfMatrixes = weightsMatrixSet.length - 1;
+    double count = 0;
+    for (TrainingExample<Double, Double> trainingExample : trainingExamples) {
+      try {
+        // get activations from feed forward propagation
+        RealVector[] activations = 
predictionStrategy.debugOutput(ConversionUtils.toValuesCollection(trainingExample.getFeatures()),
 weightsMatrixSet);
+
+        // calculate output error (corresponding to the last delta^l)
+        RealVector nextLayerDelta = calculateOutputError(trainingExample, 
activations);
+
+        deltaVectors[noOfMatrixes] = nextLayerDelta;
+
+        // back prop the error and update the deltas accordingly
+        for (int l = noOfMatrixes; l > 0; l--) {
+          RealVector currentActivationsVector = activations[l - 1];
+          nextLayerDelta = calculateDeltaVector(weightsMatrixSet[l], 
currentActivationsVector, nextLayerDelta);
+
+          // collect delta vectors for this example
+          deltaVectors[l - 1] = nextLayerDelta;
         }
 
-        return createDerivatives(triangle, count);
+        RealVector[] newActivations = new RealVector[activations.length];
+        newActivations[0] = 
ConversionUtils.toRealVector(ConversionUtils.toValuesCollection(trainingExample.getFeatures()));
+        for (int k = 0; k < activations.length - 1; k++) {
+          newActivations[k + 1] = activations[k];
+        }
+
+        // update triangle (big delta matrix)
+        updateTriangle(triangle, newActivations, deltaVectors, 
weightsMatrixSet);
+
+      } catch (Exception e) {
+        throw new WeightLearningException("error during derivatives 
calculation", e);
+      }
+      count++;
     }
 
-    private RealMatrix[] updateWeights(RealMatrix[] weightsMatrixSet, 
RealMatrix[] derivatives, double alpha) {
-        RealMatrix[] updatedParameters = new 
RealMatrix[weightsMatrixSet.length];
-        for (int l = 0; l < weightsMatrixSet.length; l++) {
-            double[][] updatedWeights = weightsMatrixSet[l].getData();
-            for (int i = 0; i < updatedWeights.length; i++) {
-                for (int j = 0; j < updatedWeights[i].length; j++) {
-                    updatedWeights[i][j] = updatedWeights[i][j] - alpha * 
derivatives[l].getData()[i][j];
-                }
-            }
-            updatedParameters[l] = new Array2DRowRealMatrix(updatedWeights);
+    return createDerivatives(triangle, count);
+  }
+
+  private RealMatrix[] updateWeights(RealMatrix[] weightsMatrixSet, 
RealMatrix[] derivatives, double alpha) {
+    RealMatrix[] updatedParameters = new RealMatrix[weightsMatrixSet.length];
+    for (int l = 0; l < weightsMatrixSet.length; l++) {
+      double[][] updatedWeights = weightsMatrixSet[l].getData();
+      for (int i = 0; i < updatedWeights.length; i++) {
+        for (int j = 0; j < updatedWeights[i].length; j++) {
+          updatedWeights[i][j] = updatedWeights[i][j] - alpha * 
derivatives[l].getData()[i][j];
         }
-        return updatedParameters;
+      }
+      updatedParameters[l] = new Array2DRowRealMatrix(updatedWeights);
     }
+    return updatedParameters;
+  }
 
-    private RealMatrix[] createDerivatives(RealMatrix[] triangle, double 
count) {
-        RealMatrix[] derivatives = new RealMatrix[triangle.length];
-        for (int i = 0; i < triangle.length; i++) {
-          // TODO : introduce regularization diversification on bias term 
(currently not regularized)
-          derivatives[i] = triangle[i].scalarMultiply(1d / count);
-        }
-        return derivatives;
+  private RealMatrix[] createDerivatives(RealMatrix[] triangle, double count) {
+    RealMatrix[] derivatives = new RealMatrix[triangle.length];
+    for (int i = 0; i < triangle.length; i++) {
+      // TODO : introduce regularization diversification on bias term 
(currently not regularized)
+      derivatives[i] = triangle[i].scalarMultiply(1d / count);
     }
+    return derivatives;
+  }
 
-    private void updateTriangle(RealMatrix[] triangle, RealVector[] 
activations, RealVector[] deltaVectors, RealMatrix[] weightsMatrixSet) {
-      for (int l = weightsMatrixSet.length - 1; l >= 0; l--) {
-          RealMatrix realMatrix = deltaVectors[l].outerProduct(activations[l]);
-          if (triangle[l] == null) {
-              triangle[l] = realMatrix;
-          } else {
-              triangle[l] = triangle[l].add(realMatrix);
-          }
+  private void updateTriangle(RealMatrix[] triangle, RealVector[] activations, 
RealVector[] deltaVectors, RealMatrix[] weightsMatrixSet) {
+    for (int l = weightsMatrixSet.length - 1; l >= 0; l--) {
+      RealMatrix realMatrix = deltaVectors[l].outerProduct(activations[l]);
+      if (triangle[l] == null) {
+        triangle[l] = realMatrix;
+      } else {
+        triangle[l] = triangle[l].add(realMatrix);
       }
+    }
   }
 
   private RealVector calculateDeltaVector(RealMatrix thetaL, RealVector 
activationsVector, RealVector nextLayerDelta) {
@@ -198,11 +197,9 @@ public class BackPropagationLearningStra
     int sampleOutputIntValue = trainingExample.getOutput().intValue();
     if (sampleOutputIntValue < sampleOutput.length) {
       sampleOutput[sampleOutputIntValue] = 1d;
-    }
-    else if (sampleOutput.length == 1){
+    } else if (sampleOutput.length == 1) {
       sampleOutput[0] = trainingExample.getOutput();
-    }
-    else {
+    } else {
       throw new RuntimeException("problem with multiclass output mapping");
     }
     RealVector learnedOutputRealVector = new ArrayRealVector(sampleOutput); // 
turn example output to a vector

Modified: 
labs/yay/trunk/core/src/main/java/org/apache/yay/core/LogisticRegressionCostFunction.java
URL: 
http://svn.apache.org/viewvc/labs/yay/trunk/core/src/main/java/org/apache/yay/core/LogisticRegressionCostFunction.java?rev=1495568&r1=1495567&r2=1495568&view=diff
==============================================================================
--- 
labs/yay/trunk/core/src/main/java/org/apache/yay/core/LogisticRegressionCostFunction.java
 (original)
+++ 
labs/yay/trunk/core/src/main/java/org/apache/yay/core/LogisticRegressionCostFunction.java
 Fri Jun 21 19:58:00 2013
@@ -44,7 +44,7 @@ public class LogisticRegressionCostFunct
 
   @Override
   public Double calculateAggregatedCost(TrainingSet<Double, Double> 
trainingExamples,
-                              Hypothesis<RealMatrix, Double, Double> 
hypothesis) throws Exception {
+                                        Hypothesis<RealMatrix, Double, Double> 
hypothesis) throws Exception {
 
     Double errorTerm = calculateErrorTerm(hypothesis, trainingExamples);
     Double regularizationTerm = calculateRegularizationTerm(hypothesis, 
trainingExamples);

Modified: 
labs/yay/trunk/core/src/main/java/org/apache/yay/core/TanhFunction.java
URL: 
http://svn.apache.org/viewvc/labs/yay/trunk/core/src/main/java/org/apache/yay/core/TanhFunction.java?rev=1495568&r1=1495567&r2=1495568&view=diff
==============================================================================
--- labs/yay/trunk/core/src/main/java/org/apache/yay/core/TanhFunction.java 
(original)
+++ labs/yay/trunk/core/src/main/java/org/apache/yay/core/TanhFunction.java Fri 
Jun 21 19:58:00 2013
@@ -24,8 +24,8 @@ import org.apache.yay.ActivationFunction
  * Tanh activation function
  */
 public class TanhFunction implements ActivationFunction<Double> {
-    @Override
-    public Double apply(Double signal) {
-        return Math.tanh(signal);
-    }
+  @Override
+  public Double apply(Double signal) {
+    return Math.tanh(signal);
+  }
 }

Modified: 
labs/yay/trunk/core/src/test/java/org/apache/yay/core/BackPropagationLearningStrategyTest.java
URL: 
http://svn.apache.org/viewvc/labs/yay/trunk/core/src/test/java/org/apache/yay/core/BackPropagationLearningStrategyTest.java?rev=1495568&r1=1495567&r2=1495568&view=diff
==============================================================================
--- 
labs/yay/trunk/core/src/test/java/org/apache/yay/core/BackPropagationLearningStrategyTest.java
 (original)
+++ 
labs/yay/trunk/core/src/test/java/org/apache/yay/core/BackPropagationLearningStrategyTest.java
 Fri Jun 21 19:58:00 2013
@@ -18,9 +18,6 @@
  */
 package org.apache.yay.core;
 
-import java.util.ArrayList;
-import java.util.Collection;
-
 import org.apache.commons.math3.linear.Array2DRowRealMatrix;
 import org.apache.commons.math3.linear.RealMatrix;
 import org.apache.yay.PredictionStrategy;
@@ -29,6 +26,9 @@ import org.apache.yay.TrainingSet;
 import org.apache.yay.core.utils.ExamplesFactory;
 import org.junit.Test;
 
+import java.util.ArrayList;
+import java.util.Collection;
+
 import static junit.framework.Assert.assertFalse;
 import static junit.framework.Assert.assertNotNull;
 
@@ -90,7 +90,7 @@ public class BackPropagationLearningStra
     RealMatrix[] initialWeights = new RealMatrix[3];
     initialWeights[0] = new Array2DRowRealMatrix(new double[][]{{0d, 0d, 0d}, 
{1d, Math.random(), Math.random()}, {1d, Math.random(), Math.random()}, {1d, 
Math.random(), Math.random()}});
     initialWeights[1] = new Array2DRowRealMatrix(new double[][]{{0d, 0d, 0d, 
0d}, {1d, Math.random(), Math.random(), Math.random()}, {1d, Math.random(), 
Math.random(), Math.random()}, {1d, Math.random(), Math.random(), 
Math.random()}});
-    initialWeights[2] = new Array2DRowRealMatrix(new 
double[][]{{1d,Math.random(), Math.random(), Math.random()}});
+    initialWeights[2] = new Array2DRowRealMatrix(new double[][]{{1d, 
Math.random(), Math.random(), Math.random()}});
 
     Collection<TrainingExample<Double, Double>> samples = 
createSamples(500000, 2);
     TrainingSet<Double, Double> trainingSet = new TrainingSet<Double, 
Double>(samples);
@@ -106,10 +106,10 @@ public class BackPropagationLearningStra
     Collection<TrainingExample<Double, Double>> trainingExamples = new 
ArrayList<TrainingExample<Double, Double>>(size);
     Double[] featureValues = new Double[noOfFeatures];
     for (int i = 0; i < size; i++) {
-        for (int j = 0; j < noOfFeatures; j++) {
-            featureValues[j] = Math.random();
-        }
-        trainingExamples.add(ExamplesFactory.createDoubleTrainingExample(1d, 
featureValues));
+      for (int j = 0; j < noOfFeatures; j++) {
+        featureValues[j] = Math.random();
+      }
+      trainingExamples.add(ExamplesFactory.createDoubleTrainingExample(1d, 
featureValues));
     }
     return trainingExamples;
   }

Modified: 
labs/yay/trunk/core/src/test/java/org/apache/yay/core/LogisticRegressionCostFunctionTest.java
URL: 
http://svn.apache.org/viewvc/labs/yay/trunk/core/src/test/java/org/apache/yay/core/LogisticRegressionCostFunctionTest.java?rev=1495568&r1=1495567&r2=1495568&view=diff
==============================================================================
--- 
labs/yay/trunk/core/src/test/java/org/apache/yay/core/LogisticRegressionCostFunctionTest.java
 (original)
+++ 
labs/yay/trunk/core/src/test/java/org/apache/yay/core/LogisticRegressionCostFunctionTest.java
 Fri Jun 21 19:58:00 2013
@@ -24,12 +24,6 @@ import org.apache.yay.CostFunction;
 import org.apache.yay.NeuralNetwork;
 import org.apache.yay.TrainingExample;
 import org.apache.yay.TrainingSet;
-import org.apache.yay.core.FeedForwardStrategy;
-import org.apache.yay.core.LogisticRegressionCostFunction;
-import org.apache.yay.core.MaxSelectionFunction;
-import org.apache.yay.core.NeuralNetworkFactory;
-import org.apache.yay.core.SigmoidFunction;
-import org.apache.yay.core.VoidLearningStrategy;
 import org.apache.yay.core.utils.ExamplesFactory;
 import org.junit.Before;
 import org.junit.Test;
@@ -44,7 +38,7 @@ import static org.junit.Assert.assertTru
  */
 public class LogisticRegressionCostFunctionTest {
 
-  private CostFunction<RealMatrix,Double,Double> costFunction;
+  private CostFunction<RealMatrix, Double, Double> costFunction;
   private TrainingSet<Double, Double> trainingSet;
 
   @Before
@@ -62,7 +56,6 @@ public class LogisticRegressionCostFunct
     trainingExamples.add(example4);
     trainingSet = new TrainingSet<Double, Double>(trainingExamples);
 
-
   }
 
   @Test
@@ -75,7 +68,6 @@ public class LogisticRegressionCostFunct
             new VoidLearningStrategy<Double, Double>(), new 
FeedForwardStrategy(
             new SigmoidFunction()), new MaxSelectionFunction<Double>());
 
-
     Double cost = costFunction.calculateAggregatedCost(trainingSet, 
neuralNetwork);
     assertTrue("cost should not be negative", cost > 0d);
   }



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to