This is an automated email from the ASF dual-hosted git repository.
mboehm7 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/systemml.git
The following commit(s) were added to refs/heads/master by this push:
new a3c0cce [SYSTEMML-2533] Fix named arguments in MNIST LeNet example
script
a3c0cce is described below
commit a3c0cce761c855b034302e1f0871d68d8eccd089
Author: Nathan Kan <[email protected]>
AuthorDate: Thu Apr 9 19:55:39 2020 +0200
[SYSTEMML-2533] Fix named arguments in MNIST LeNet example script
This fix backports the fix from #866 into the merged SystemDS code line.
Closes #867.
---
scripts/nn/examples/mnist_lenet.dml | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/scripts/nn/examples/mnist_lenet.dml
b/scripts/nn/examples/mnist_lenet.dml
index 57b8ba6..a882501 100644
--- a/scripts/nn/examples/mnist_lenet.dml
+++ b/scripts/nn/examples/mnist_lenet.dml
@@ -118,13 +118,13 @@ train = function(matrix[double] X, matrix[double] Y,
stride, stride, pad, pad)
outr1 = relu::forward(outc1)
[outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1,
Hf=2, Wf=2,
- strideh=2, stridew=2,
pad=0, pad=0)
+ strideh=2, stridew=2,
padh=0, padw=0)
## layer 2: conv2 -> relu2 -> pool2
[outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1,
Woutp1, Hf, Wf,
stride, stride, pad, pad)
outr2 = relu::forward(outc2)
[outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2,
Hf=2, Wf=2,
- strideh=2, stridew=2,
pad=0, pad=0)
+ strideh=2, stridew=2,
padh=0, padw=0)
## layer 3: affine3 -> relu3 -> dropout
outa3 = affine::forward(outp2, W3, b3)
outr3 = relu::forward(outa3)
@@ -166,13 +166,13 @@ train = function(matrix[double] X, matrix[double] Y,
[doutp2, dW3, db3] = affine::backward(douta3, outp2, W3, b3)
## layer 2: conv2 -> relu2 -> pool2
doutr2 = max_pool2d::backward(doutp2, Houtp2, Woutp2, outr2, F2, Houtc2,
Woutc2, Hf=2, Wf=2,
- strideh=2, stridew=2, pad=0, pad=0)
+ strideh=2, stridew=2, padh=0, padw=0)
doutc2 = relu::backward(doutr2, outc2)
[doutp1, dW2, db2] = conv2d::backward(doutc2, Houtc2, Woutc2, outp1, W2,
b2, F1,
Houtp1, Woutp1, Hf, Wf, stride,
stride, pad, pad)
## layer 1: conv1 -> relu1 -> pool1
doutr1 = max_pool2d::backward(doutp1, Houtp1, Woutp1, outr1, F1, Houtc1,
Woutc1, Hf=2, Wf=2,
- strideh=2, stridew=2, pad=0, pad=0)
+ strideh=2, stridew=2, padh=0, padw=0)
doutc1 = relu::backward(doutr1, outc1)
[dX_batch, dW1, db1] = conv2d::backward(doutc1, Houtc1, Woutc1, X_batch,
W1, b1, C, Hin, Win,
Hf, Wf, stride, stride, pad, pad)
@@ -264,13 +264,13 @@ predict = function(matrix[double] X, int C, int Hin, int
Win,
pad, pad)
outr1 = relu::forward(outc1)
[outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1,
Hf=2, Wf=2,
- strideh=2, stridew=2, pad=0,
pad=0)
+ strideh=2, stridew=2,
padh=0, padw=0)
## layer 2: conv2 -> relu2 -> pool2
[outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1,
Woutp1, Hf, Wf,
stride, stride, pad, pad)
outr2 = relu::forward(outc2)
[outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2,
Hf=2, Wf=2,
- strideh=2, stridew=2, pad=0,
pad=0)
+ strideh=2, stridew=2,
padh=0, padw=0)
## layer 3: affine3 -> relu3
outa3 = affine::forward(outp2, W3, b3)
outr3 = relu::forward(outa3)