Repository: systemml
Updated Branches:
  refs/heads/master 9389a5e1e -> c83e99af7


[MINOR] Cleanup in the `nn` library.


Project: http://git-wip-us.apache.org/repos/asf/systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/systemml/commit/cbfb21cb
Tree: http://git-wip-us.apache.org/repos/asf/systemml/tree/cbfb21cb
Diff: http://git-wip-us.apache.org/repos/asf/systemml/diff/cbfb21cb

Branch: refs/heads/master
Commit: cbfb21cbcdcac699f93cdbb851138f17f6fcd9b6
Parents: 9389a5e
Author: Mike Dusenberry <[email protected]>
Authored: Mon Jun 19 13:52:47 2017 -0700
Committer: Mike Dusenberry <[email protected]>
Committed: Mon Jun 19 13:52:47 2017 -0700

----------------------------------------------------------------------
 scripts/nn/layers/conv2d_transpose.dml |  10 +-
 scripts/nn/test/test.dml               | 182 ++++++++++++++--------------
 2 files changed, 97 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/systemml/blob/cbfb21cb/scripts/nn/layers/conv2d_transpose.dml
----------------------------------------------------------------------
diff --git a/scripts/nn/layers/conv2d_transpose.dml 
b/scripts/nn/layers/conv2d_transpose.dml
index eee19a5..bdc5090 100644
--- a/scripts/nn/layers/conv2d_transpose.dml
+++ b/scripts/nn/layers/conv2d_transpose.dml
@@ -20,10 +20,11 @@
 #-------------------------------------------------------------
 
 /*
- * 2D Transpose convolutional layer.
+ * 2D Transpose Convolutional layer.
  *
  * Utilizes built-in convolution operators for higher performance.
  */
+source("nn/util.dml") as util
 
 forward = function(matrix[double] X, matrix[double] W, matrix[double] b,
                    int C, int Hin, int Win, int Hf, int Wf,
@@ -60,8 +61,8 @@ forward = function(matrix[double] X, matrix[double] W, 
matrix[double] b,
    */
   N = nrow(X)
   F = nrow(b)
-  Hout = strideh * (Hin-1) - 2*padh + Hf + out_padh
-  Wout = stridew * (Win-1) - 2*padw + Wf + out_padw
+  Hout = strideh*(Hin-1) - 2*padh + Hf + out_padh
+  Wout = stridew*(Win-1) - 2*padw + Wf + out_padw
 
   # Transpose convolution aims to go in the other direction of
   # (direct) convolution, i.e., given input X, produce output O such
@@ -146,7 +147,7 @@ backward = function(matrix[double] dout, int Hout, int Wout,
               stride=[strideh,stridew], padding=[padh,padw])
 
   # Partial derivatives for bias vector
-  db = rowSums(matrix(colSums(dout), rows=F, cols=Hout*Wout))
+  db = util::channel_sums(dout, F, Hout, Wout)
 }
 
 init = function(int F, int C, int Hf, int Wf)
@@ -235,3 +236,4 @@ init_bilinear = function(int C, int K)
 
   b = matrix(0, rows=C, cols=1)
 }
+

http://git-wip-us.apache.org/repos/asf/systemml/blob/cbfb21cb/scripts/nn/test/test.dml
----------------------------------------------------------------------
diff --git a/scripts/nn/test/test.dml b/scripts/nn/test/test.dml
index cfb8c79..94965bf 100644
--- a/scripts/nn/test/test.dml
+++ b/scripts/nn/test/test.dml
@@ -69,6 +69,97 @@ batch_norm1d = function() {
   }
 }
 
+batch_norm2d = function() {
+  /*
+   * Test for the 2D (spatial) batch normalization function.
+   */
+  print("Testing the 2D (spatial) batch normalization function.")
+
+  # Generate data
+  N = 2  # Number of examples
+  C = 3  # num channels
+  Hin = 4  # input height
+  Win = 5  # input width
+  mode = 'train'  # execution mode
+  mu = 0.9  # momentum of moving averages
+  eps = 1e-5  # smoothing term
+  X = matrix("70  29 23 55 72
+              42  98 68 48 39
+              34  73 44  6 40
+              74  18 18 53 53
+
+              63  85 72 61 72
+              32  36 23 29 63
+               9  43 43 49 43
+              31  43 89 94 50
+
+              62  12 32 41 87
+              25  48 99 52 61
+              12  83 60 55 34
+              30  42 68 88 51
+
+
+              67  59 62 67 84
+               8  76 24 19 57
+              10  89 63 72  2
+              59  56 16 15 70
+
+              32  69 55 39 93
+              84  36  4 30 40
+              70 100 36 76 59
+              69  15 40 24 34
+
+              51  67 11 13 32
+              66  85 55 85 38
+              32  35 17 83 34
+              55  58 52  0 99", rows=N, cols=C*Hin*Win)
+
+  # Create layer
+  [gamma, beta, ema_mean, ema_var] = batch_norm2d::init(C)
+
+  # Forward
+  [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =
+      batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, 
ema_var, mu, eps)
+
+  # Equivalency check
+  target = matrix("0.86215019 -0.76679718 -1.00517964  0.26619387  0.94161105
+                  -0.25030172  1.97460198  0.78268933 -0.01191914 -0.36949289
+                  -0.56814504  0.98134136 -0.17084086 -1.68059683 -0.32976246
+                   1.02107191 -1.20383179 -1.20383179  0.18673301  0.18673301
+
+                   0.50426388  1.41921711  0.87856293  0.42108631  0.87856293
+                  -0.78498828 -0.61863315 -1.15928721 -0.90975463  0.50426388
+                  -1.74153018 -0.32751167 -0.32751167 -0.07797909 -0.32751167
+                  -0.82657707 -0.32751167  1.58557224  1.79351616 -0.0363903
+
+                   0.4607178  -1.49978399 -0.71558321 -0.36269283  1.44096887
+                  -0.99005347 -0.08822262  1.91148913  0.06861746  0.42150795
+                  -1.49978399  1.28412855  0.38229787  0.18624771 -0.63716316
+                  -0.79400325 -0.32348287  0.69597805  1.48017895  0.0294075
+
+
+                   0.74295878  0.42511559  0.54430676  0.74295878  1.41837597
+                  -1.60113597  1.10053277 -0.96544927 -1.16410136  0.34565473
+                  -1.52167511  1.61702824  0.5840373   0.94161105 -1.83951855
+                   0.42511559  0.30592418 -1.28329265 -1.32302308  0.86215019
+
+                  -0.78498828  0.75379658  0.17155361 -0.4938668   1.75192738
+                   1.37762833 -0.61863315 -1.9494741  -0.86816585 -0.45227802
+                   0.79538536  2.04304862 -0.61863315  1.04491806  0.33790874
+                   0.75379658 -1.49199748 -0.45227802 -1.11769855 -0.70181072
+
+                   0.0294075   0.65676796 -1.53899395 -1.46057391 -0.71558321
+                   0.61755812  1.36254871  0.18624771  1.36254871 -0.48032296
+                  -0.71558321 -0.59795308 -1.30373383  1.28412855 -0.63716316
+                   0.18624771  0.30387771  0.06861746 -1.97030437  1.91148913",
+                  rows=1, cols=N*C*Hin*Win)
+  out = matrix(out, rows=1, cols=N*C*Hin*Win)
+  for (i in 1:length(out)) {
+    rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
+                                           as.scalar(target[1,i]), 1e-3, 1e-4)
+  }
+}
+
 conv2d = function() {
   /*
    * Test for the 2D convolution functions.
@@ -491,97 +582,6 @@ max_pool2d = function() {
   tmp = test_util::check_all_equal(out_builtin, target)
 }
 
-batch_norm2d = function() {
-  /*
-   * Test for the 2D (spatial) batch normalization function.
-   */
-  print("Testing the 2D (spatial) batch normalization function.")
-
-  # Generate data
-  N = 2  # Number of examples
-  C = 3  # num channels
-  Hin = 4  # input height
-  Win = 5  # input width
-  mode = 'train'  # execution mode
-  mu = 0.9  # momentum of moving averages
-  eps = 1e-5  # smoothing term
-  X = matrix("70  29 23 55 72
-              42  98 68 48 39
-              34  73 44  6 40
-              74  18 18 53 53
-
-              63  85 72 61 72
-              32  36 23 29 63
-               9  43 43 49 43
-              31  43 89 94 50
-
-              62  12 32 41 87
-              25  48 99 52 61
-              12  83 60 55 34
-              30  42 68 88 51
-
-
-              67  59 62 67 84
-               8  76 24 19 57
-              10  89 63 72  2
-              59  56 16 15 70
-
-              32  69 55 39 93
-              84  36  4 30 40
-              70 100 36 76 59
-              69  15 40 24 34
-
-              51  67 11 13 32
-              66  85 55 85 38
-              32  35 17 83 34
-              55  58 52  0 99", rows=N, cols=C*Hin*Win)
-
-  # Create layer
-  [gamma, beta, ema_mean, ema_var] = batch_norm2d::init(C)
-
-  # Forward
-  [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =
-      batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, 
ema_var, mu, eps)
-
-  # Equivalency check
-  target = matrix("0.86215019 -0.76679718 -1.00517964  0.26619387  0.94161105
-                  -0.25030172  1.97460198  0.78268933 -0.01191914 -0.36949289
-                  -0.56814504  0.98134136 -0.17084086 -1.68059683 -0.32976246
-                   1.02107191 -1.20383179 -1.20383179  0.18673301  0.18673301
-
-                   0.50426388  1.41921711  0.87856293  0.42108631  0.87856293
-                  -0.78498828 -0.61863315 -1.15928721 -0.90975463  0.50426388
-                  -1.74153018 -0.32751167 -0.32751167 -0.07797909 -0.32751167
-                  -0.82657707 -0.32751167  1.58557224  1.79351616 -0.0363903
-
-                   0.4607178  -1.49978399 -0.71558321 -0.36269283  1.44096887
-                  -0.99005347 -0.08822262  1.91148913  0.06861746  0.42150795
-                  -1.49978399  1.28412855  0.38229787  0.18624771 -0.63716316
-                  -0.79400325 -0.32348287  0.69597805  1.48017895  0.0294075
-
-
-                   0.74295878  0.42511559  0.54430676  0.74295878  1.41837597
-                  -1.60113597  1.10053277 -0.96544927 -1.16410136  0.34565473
-                  -1.52167511  1.61702824  0.5840373   0.94161105 -1.83951855
-                   0.42511559  0.30592418 -1.28329265 -1.32302308  0.86215019
-
-                  -0.78498828  0.75379658  0.17155361 -0.4938668   1.75192738
-                   1.37762833 -0.61863315 -1.9494741  -0.86816585 -0.45227802
-                   0.79538536  2.04304862 -0.61863315  1.04491806  0.33790874
-                   0.75379658 -1.49199748 -0.45227802 -1.11769855 -0.70181072
-
-                   0.0294075   0.65676796 -1.53899395 -1.46057391 -0.71558321
-                   0.61755812  1.36254871  0.18624771  1.36254871 -0.48032296
-                  -0.71558321 -0.59795308 -1.30373383  1.28412855 -0.63716316
-                   0.18624771  0.30387771  0.06861746 -1.97030437  1.91148913",
-                  rows=1, cols=N*C*Hin*Win)
-  out = matrix(out, rows=1, cols=N*C*Hin*Win)
-  for (i in 1:length(out)) {
-    rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                           as.scalar(target[1,i]), 1e-3, 1e-4)
-  }
-}
-
 tanh = function() {
   /*
    * Test for the `tanh` forward function.

Reply via email to