Repository: systemml
Updated Branches:
  refs/heads/master fb675b82c -> db9da2855


[SYSTEMML-2463] Fix paramserv tests (incorrect named argument usage)

With the recently added support for named function arguments various
places in SystemML check the validity of used named arguments. This
makes the existing paramserv tests fail because they use incorrect name
bindings that have been ignored so far.


Project: http://git-wip-us.apache.org/repos/asf/systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/systemml/commit/db9da285
Tree: http://git-wip-us.apache.org/repos/asf/systemml/tree/db9da285
Diff: http://git-wip-us.apache.org/repos/asf/systemml/diff/db9da285

Branch: refs/heads/master
Commit: db9da28551bd85f234c196ac8fd7ea25cccc8543
Parents: fb675b8
Author: Matthias Boehm <[email protected]>
Authored: Wed Jul 25 18:03:57 2018 -0700
Committer: Matthias Boehm <[email protected]>
Committed: Wed Jul 25 18:03:57 2018 -0700

----------------------------------------------------------------------
 .../functions/paramserv/mnist_lenet_paramserv.dml | 18 ++++++------------
 .../mnist_lenet_paramserv_minimum_version.dml     | 18 ++++++------------
 2 files changed, 12 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/systemml/blob/db9da285/src/test/scripts/functions/paramserv/mnist_lenet_paramserv.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/paramserv/mnist_lenet_paramserv.dml 
b/src/test/scripts/functions/paramserv/mnist_lenet_paramserv.dml
index 84095ec..bce4eea 100644
--- a/src/test/scripts/functions/paramserv/mnist_lenet_paramserv.dml
+++ b/src/test/scripts/functions/paramserv/mnist_lenet_paramserv.dml
@@ -157,14 +157,12 @@ gradients = function(matrix[double] features,
   [outc1, Houtc1, Woutc1] = conv2d::forward(features, W1, b1, C, Hin, Win, Hf, 
Wf,
                                               stride, stride, pad, pad)
   outr1 = relu::forward(outc1)
-  [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, 
Hf=2, Wf=2,
-                                                strideh=2, stridew=2, pad=0, 
pad=0)
+  [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, 2, 
2, 2, 2, 0, 0)
   ## layer 2: conv2 -> relu2 -> pool2
   [outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1, Woutp1, 
Hf, Wf,
                                             stride, stride, pad, pad)
   outr2 = relu::forward(outc2)
-  [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, 
Hf=2, Wf=2,
-                                                strideh=2, stridew=2, pad=0, 
pad=0)
+  [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, 2, 
2, 2, 2, 0, 0)
   ## layer 3:  affine3 -> relu3 -> dropout
   outa3 = affine::forward(outp2, W3, b3)
   outr3 = relu::forward(outa3)
@@ -184,14 +182,12 @@ gradients = function(matrix[double] features,
   douta3 = relu::backward(doutr3, outa3)
   [doutp2, dW3, db3] = affine::backward(douta3, outp2, W3, b3)
   ## layer 2: conv2 -> relu2 -> pool2
-  doutr2 = max_pool2d::backward(doutp2, Houtp2, Woutp2, outr2, F2, Houtc2, 
Woutc2, Hf=2, Wf=2,
-                                strideh=2, stridew=2, pad=0, pad=0)
+  doutr2 = max_pool2d::backward(doutp2, Houtp2, Woutp2, outr2, F2, Houtc2, 
Woutc2, 2, 2, 2, 2, 0, 0)
   doutc2 = relu::backward(doutr2, outc2)
   [doutp1, dW2, db2] = conv2d::backward(doutc2, Houtc2, Woutc2, outp1, W2, b2, 
F1,
                                         Houtp1, Woutp1, Hf, Wf, stride, 
stride, pad, pad)
   ## layer 1: conv1 -> relu1 -> pool1
-  doutr1 = max_pool2d::backward(doutp1, Houtp1, Woutp1, outr1, F1, Houtc1, 
Woutc1, Hf=2, Wf=2,
-                                strideh=2, stridew=2, pad=0, pad=0)
+  doutr1 = max_pool2d::backward(doutp1, Houtp1, Woutp1, outr1, F1, Houtc1, 
Woutc1, 2, 2, 2, 2, 0, 0)
   doutc1 = relu::backward(doutr1, outc1)
   [dX_batch, dW1, db1] = conv2d::backward(doutc1, Houtc1, Woutc1, features, 
W1, b1, C, Hin, Win,
                                           Hf, Wf, stride, stride, pad, pad)
@@ -314,14 +310,12 @@ predict = function(matrix[double] X, int C, int Hin, int 
Win, int batch_size,
     [outc1, Houtc1, Woutc1] = conv2d::forward(X_batch, W1, b1, C, Hin, Win, 
Hf, Wf, stride, stride,
                                               pad, pad)
     outr1 = relu::forward(outc1)
-    [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, 
Hf=2, Wf=2,
-                                                  strideh=2, stridew=2, pad=0, 
pad=0)
+    [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, 
2, 2, 2, 2, 0, 0)
     ## layer 2: conv2 -> relu2 -> pool2
     [outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1, 
Woutp1, Hf, Wf,
                                               stride, stride, pad, pad)
     outr2 = relu::forward(outc2)
-    [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, 
Hf=2, Wf=2,
-                                                  strideh=2, stridew=2, pad=0, 
pad=0)
+    [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, 
2, 2, 2, 2, 0, 0)
     ## layer 3:  affine3 -> relu3
     outa3 = affine::forward(outp2, W3, b3)
     outr3 = relu::forward(outa3)

http://git-wip-us.apache.org/repos/asf/systemml/blob/db9da285/src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml
----------------------------------------------------------------------
diff --git 
a/src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml
 
b/src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml
index aeec3df..a3677aa 100644
--- 
a/src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml
+++ 
b/src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml
@@ -151,14 +151,12 @@ gradients = function(matrix[double] features,
   [outc1, Houtc1, Woutc1] = conv2d::forward(features, W1, b1, C, Hin, Win, Hf, 
Wf,
                                               stride, stride, pad, pad)
   outr1 = relu::forward(outc1)
-  [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, 
Hf=2, Wf=2,
-                                                strideh=2, stridew=2, pad=0, 
pad=0)
+  [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, 2, 
2, 2, 2, 0, 0)
   ## layer 2: conv2 -> relu2 -> pool2
   [outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1, Woutp1, 
Hf, Wf,
                                             stride, stride, pad, pad)
   outr2 = relu::forward(outc2)
-  [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, 
Hf=2, Wf=2,
-                                                strideh=2, stridew=2, pad=0, 
pad=0)
+  [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, 2, 
2, 2, 2, 0, 0)
   ## layer 3:  affine3 -> relu3 -> dropout
   outa3 = affine::forward(outp2, W3, b3)
   outr3 = relu::forward(outa3)
@@ -178,14 +176,12 @@ gradients = function(matrix[double] features,
   douta3 = relu::backward(doutr3, outa3)
   [doutp2, dW3, db3] = affine::backward(douta3, outp2, W3, b3)
   ## layer 2: conv2 -> relu2 -> pool2
-  doutr2 = max_pool2d::backward(doutp2, Houtp2, Woutp2, outr2, F2, Houtc2, 
Woutc2, Hf=2, Wf=2,
-                                strideh=2, stridew=2, pad=0, pad=0)
+  doutr2 = max_pool2d::backward(doutp2, Houtp2, Woutp2, outr2, F2, Houtc2, 
Woutc2, 2, 2, 2, 2, 0, 0)
   doutc2 = relu::backward(doutr2, outc2)
   [doutp1, dW2, db2] = conv2d::backward(doutc2, Houtc2, Woutc2, outp1, W2, b2, 
F1,
                                         Houtp1, Woutp1, Hf, Wf, stride, 
stride, pad, pad)
   ## layer 1: conv1 -> relu1 -> pool1
-  doutr1 = max_pool2d::backward(doutp1, Houtp1, Woutp1, outr1, F1, Houtc1, 
Woutc1, Hf=2, Wf=2,
-                                strideh=2, stridew=2, pad=0, pad=0)
+  doutr1 = max_pool2d::backward(doutp1, Houtp1, Woutp1, outr1, F1, Houtc1, 
Woutc1, 2, 2, 2, 2, 0, 0)
   doutc1 = relu::backward(doutr1, outc1)
   [dX_batch, dW1, db1] = conv2d::backward(doutc1, Houtc1, Woutc1, features, 
W1, b1, C, Hin, Win,
                                           Hf, Wf, stride, stride, pad, pad)
@@ -307,14 +303,12 @@ predict = function(matrix[double] X, int C, int Hin, int 
Win, int batch_size,
     [outc1, Houtc1, Woutc1] = conv2d::forward(X_batch, W1, b1, C, Hin, Win, 
Hf, Wf, stride, stride,
                                               pad, pad)
     outr1 = relu::forward(outc1)
-    [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, 
Hf=2, Wf=2,
-                                                  strideh=2, stridew=2, pad=0, 
pad=0)
+    [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, 
2, 2, 2, 2, 0, 0)
     ## layer 2: conv2 -> relu2 -> pool2
     [outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1, 
Woutp1, Hf, Wf,
                                               stride, stride, pad, pad)
     outr2 = relu::forward(outc2)
-    [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, 
Hf=2, Wf=2,
-                                                  strideh=2, stridew=2, pad=0, 
pad=0)
+    [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, 
2, 2, 2, 2, 0, 0)
     ## layer 3:  affine3 -> relu3
     outa3 = affine::forward(outp2, W3, b3)
     outr3 = relu::forward(outa3)

Reply via email to