Repository: incubator-systemml
Updated Branches:
  refs/heads/master 1acd86543 -> 5412e2d75


[SYSTEMML-1185] Updating breast cancer ML notebook & script

Updates to the machine learning notebook and convnet.dml model based on
recent changes.


Project: http://git-wip-us.apache.org/repos/asf/incubator-systemml/repo
Commit: 
http://git-wip-us.apache.org/repos/asf/incubator-systemml/commit/bbca632a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-systemml/tree/bbca632a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-systemml/diff/bbca632a

Branch: refs/heads/master
Commit: bbca632a8617f5d76ed62778616e0ebcd3ccdcab
Parents: 1acd865
Author: Mike Dusenberry <[email protected]>
Authored: Wed Apr 5 18:12:57 2017 -0700
Committer: Mike Dusenberry <[email protected]>
Committed: Wed Apr 5 18:12:57 2017 -0700

----------------------------------------------------------------------
 projects/breast_cancer/MachineLearning.ipynb | 40 +++++++++++++----------
 projects/breast_cancer/convnet.dml           | 24 +++++++-------
 2 files changed, 34 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/bbca632a/projects/breast_cancer/MachineLearning.ipynb
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/MachineLearning.ipynb 
b/projects/breast_cancer/MachineLearning.ipynb
index 4999250..0ac880c 100644
--- a/projects/breast_cancer/MachineLearning.ipynb
+++ b/projects/breast_cancer/MachineLearning.ipynb
@@ -85,7 +85,8 @@
     "size=256\n",
     "grayscale = False\n",
     "c = 1 if grayscale else 3\n",
-    "p = 0.01"
+    "p = 0.01\n",
+    "folder = \"data\""
    ]
   },
   {
@@ -99,13 +100,13 @@
    "outputs": [],
    "source": [
     "if p < 1:\n",
-    "  tr_sample_filename = os.path.join(\"data\", 
\"train_{}_sample_{}{}.parquet\".format(p, size, \"_grayscale\" if grayscale 
else \"\"))\n",
-    "  val_sample_filename = os.path.join(\"data\", 
\"val_{}_sample_{}{}.parquet\".format(p, size, \"_grayscale\" if grayscale else 
\"\"))\n",
+    "  tr_filename = os.path.join(folder, 
\"train_{}_sample_{}{}.parquet\".format(p, size, \"_grayscale\" if grayscale 
else \"\"))\n",
+    "  val_filename = os.path.join(folder, 
\"val_{}_sample_{}{}.parquet\".format(p, size, \"_grayscale\" if grayscale else 
\"\"))\n",
     "else:\n",
-    "  tr_filename = \"train_{}{}.parquet\".format(size, \"_grayscale\" if 
grayscale else \"\")\n",
-    "  val_filename = \"val_{}{}.parquet\".format(size, \"_grayscale\" if 
grayscale else \"\")\n",
-    "train_df = sqlContext.read.load(tr_sample_filename)\n",
-    "val_df = sqlContext.read.load(val_sample_filename)\n",
+    "  tr_filename = os.path.join(folder, \"train_{}{}.parquet\".format(size, 
\"_grayscale\" if grayscale else \"\"))\n",
+    "  val_filename = os.path.join(folder, \"val_{}{}.parquet\".format(size, 
\"_grayscale\" if grayscale else \"\"))\n",
+    "train_df = spark.read.load(tr_filename)\n",
+    "val_df = spark.read.load(val_filename)\n",
     "train_df, val_df"
    ]
   },
@@ -326,7 +327,7 @@
     "lr = 1e-2  # learning rate\n",
     "mu = 0.9  # momentum\n",
     "decay = 0.999  # learning rate decay constant\n",
-    "batch_size = 50\n",
+    "batch_size = 32\n",
     "epochs = 500\n",
     "log_interval = 1\n",
     "n = 200  # sample size for overfitting sanity check\n",
@@ -367,7 +368,7 @@
     "lr = 5e-7  # learning rate\n",
     "mu = 0.5  # momentum\n",
     "decay = 0.999  # learning rate decay constant\n",
-    "batch_size = 50\n",
+    "batch_size = 32\n",
     "epochs = 1\n",
     "log_interval = 10\n",
     "\n",
@@ -464,7 +465,7 @@
     "mu = 0.9  # momentum\n",
     "decay = 0.999  # learning rate decay constant\n",
     "lambda = 0  #5e-04\n",
-    "batch_size = 50\n",
+    "batch_size = 32\n",
     "epochs = 300\n",
     "log_interval = 1\n",
     "dir = \"models/lenet-cnn/sanity/\"\n",
@@ -515,7 +516,7 @@
     "  mu = as.scalar(rand(rows=1, cols=1, min=0.5, max=0.9))  # momentum\n",
     "  decay = as.scalar(rand(rows=1, cols=1, min=0.9, max=1))  # learning 
rate decay constant\n",
     "  lambda = 10 ^ as.scalar(rand(rows=1, cols=1, min=-7, max=-1))  # 
regularization constant\n",
-    "  batch_size = 50\n",
+    "  batch_size = 32\n",
     "  epochs = 1\n",
     "  log_interval = 10\n",
     "  trial_dir = dir + \"j/\"\n",
@@ -568,20 +569,23 @@
     "mu = 0.632  # momentum\n",
     "decay = 0.99  # learning rate decay constant\n",
     "lambda = 0.00385\n",
-    "batch_size = 50\n",
+    "batch_size = 32\n",
     "epochs = 1\n",
     "log_interval = 10\n",
     "dir = \"models/lenet-cnn/train/\"\n",
     "\n",
     "# Train\n",
-    "[Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2] = clf::train(X, Y, 
X_val, Y_val, C, Hin, Win, lr, mu, decay, lambda, batch_size, epochs, 
log_interval, dir)\n",
+    "[Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2] =\n",
+    "    clf::train(X, Y, X_val, Y_val, C, Hin, Win, lr, mu, decay,\n",
+    "               lambda, batch_size, epochs, log_interval, dir)\n",
     "\"\"\"\n",
-    "outputs = (\"Wc1\", \"bc1\", \"Wc2\", \"bc2\", \"Wc3\", \"bc3\", \"Wa1\", 
\"ba1\", \"Wa2\", \"ba2\")\n",
+    "outputs = (\"Wc1\", \"bc1\", \"Wc2\", \"bc2\", \"Wc3\", \"bc3\",\n",
+    "           \"Wa1\", \"ba1\", \"Wa2\", \"ba2\")\n",
     "script = (dml(script).input(X=X, X_val=X_val, Y=Y, Y_val=Y_val,\n",
     "                            C=c, Hin=size, Win=size)\n",
     "                     .output(*outputs))\n",
-    "Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2 = 
ml.execute(script).get(*outputs)\n",
-    "Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2"
+    "outs = ml.execute(script).get(*outputs)\n",
+    "Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2 = outs"
    ]
   },
   {
@@ -629,9 +633,9 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Python 3 + Spark 2.x + SystemML",
    "language": "python",
-   "name": "python3"
+   "name": "pyspark3_2.x"
   },
   "language_info": {
    "codemirror_mode": {

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/bbca632a/projects/breast_cancer/convnet.dml
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/convnet.dml 
b/projects/breast_cancer/convnet.dml
index 85c7dd8..6cbea39 100644
--- a/projects/breast_cancer/convnet.dml
+++ b/projects/breast_cancer/convnet.dml
@@ -149,19 +149,19 @@ train = function(matrix[double] X, matrix[double] Y,
                                                 stride, stride, pad, pad)
       outc1r = relu::forward(outc1)
       [outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, 
Woutc1, Hf=2, Wf=2,
-                                                       strideh=2, stridew=2)
+                                                       strideh=2, stridew=2, 
0, 0)
       ## conv layer 2: conv2 -> relu2 -> pool2
       [outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, 
Woutc1p, Hf, Wf,
                                                 stride, stride, pad, pad)
       outc2r = relu::forward(outc2)
       [outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, 
Woutc2, Hf=2, Wf=2,
-                                                       strideh=2, stridew=2)
+                                                       strideh=2, stridew=2, 
0, 0)
       ## conv layer 3: conv3 -> relu3 -> pool3
       [outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, 
Woutc2p, Hf, Wf,
                                                 stride, stride, pad, pad)
       outc3r = relu::forward(outc3)
       [outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, 
Woutc3, Hf=2, Wf=2,
-                                                       strideh=2, stridew=2)
+                                                       strideh=2, stridew=2, 
0, 0)
       ## affine layer 1:  affine1 -> relu1 -> dropout1
       outa1 = affine::forward(outc3p, Wa1, ba1)
       outa1r = relu::forward(outa1)
@@ -183,19 +183,19 @@ train = function(matrix[double] X, matrix[double] Y,
       [doutc3p, dWa1, dba1] = affine::backward(douta1, outc3p, Wa1, ba1)
       ## conv layer 3: conv3 -> relu3 -> pool3
       doutc3r = max_pool2d::backward(doutc3p, Houtc3p, Woutc3p, outc3r, F3, 
Houtc3, Woutc3,
-                                     Hf=2, Wf=2, strideh=2, stridew=2)
+                                     Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)
       doutc3 = relu::backward(doutc3r, outc3)
       [doutc2p, dWc3, dbc3] = conv2d::backward(doutc3, Houtc3, Woutc3, outc2p, 
Wc3, bc2, F2,
                                                Houtc2p, Woutc2p, Hf, Wf, 
stride, stride, pad, pad)
       ## conv layer 2: conv2 -> relu2 -> pool2
       doutc2r = max_pool2d::backward(doutc2p, Houtc2p, Woutc2p, outc2r, F2, 
Houtc2, Woutc2,
-                                     Hf=2, Wf=2, strideh=2, stridew=2)
+                                     Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)
       doutc2 = relu::backward(doutc2r, outc2)
       [doutc1p, dWc2, dbc2] = conv2d::backward(doutc2, Houtc2, Woutc2, outc1p, 
Wc2, bc2, F1,
                                                Houtc1p, Woutc1p, Hf, Wf, 
stride, stride, pad, pad)
       ## conv layer 1: conv1 -> relu1 -> pool1
       doutc1r = max_pool2d::backward(doutc1p, Houtc1p, Woutc1p, outc1r, F1, 
Houtc1, Woutc1,
-                                     Hf=2, Wf=2, strideh=2, stridew=2)
+                                     Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)
       doutc1 = relu::backward(doutc1r, outc1)
       [dX_batch, dWc1, dbc1] = conv2d::backward(doutc1, Houtc1, Woutc1, 
X_batch, Wc1, bc1, C,
                                                 Hin, Win, Hf, Wf, stride, 
stride, pad, pad)
@@ -382,19 +382,19 @@ predict = function(matrix[double] X, int C, int Hin, int 
Win,
   #                                          pad, pad)
   #outc1r = relu::forward(outc1)
   #[outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, 
Woutc1, Hf=2, Wf=2,
-  #                                                 strideh=2, stridew=2)
+  #                                                 strideh=2, stridew=2, 0, 0)
   ### conv layer 2: conv2 -> relu2 -> pool2
   #[outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, 
Woutc1p, Hf, Wf,
   #                                          stride, stride, pad, pad)
   #outc2r = relu::forward(outc2)
   #[outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, 
Woutc2, Hf=2, Wf=2,
-  #                                                 strideh=2, stridew=2)
+  #                                                 strideh=2, stridew=2, 0, 0)
   ### conv layer 3: conv3 -> relu3 -> pool3
   #[outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, 
Woutc2p, Hf, Wf,
   #                                          stride, stride, pad, pad)
   #outc3r = relu::forward(outc3)
   #[outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, 
Woutc3, Hf=2, Wf=2,
-  #                                                 strideh=2, stridew=2)
+  #                                                 strideh=2, stridew=2, 0, 0)
   ### affine layer 1:  affine1 -> relu1 -> dropout
   #outa1 = affine::forward(outc3p, Wa1, ba1)
   #outa1r = relu::forward(outa1)
@@ -421,19 +421,19 @@ predict = function(matrix[double] X, int C, int Hin, int 
Win,
                                               stride, stride, pad, pad)
     outc1r = relu::forward(outc1)
     [outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, 
Woutc1, Hf=2, Wf=2,
-                                                     strideh=2, stridew=2)
+                                                     strideh=2, stridew=2, 0, 
0)
     ## conv layer 2: conv2 -> relu2 -> pool2
     [outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, 
Woutc1p, Hf, Wf,
                                               stride, stride, pad, pad)
     outc2r = relu::forward(outc2)
     [outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, 
Woutc2, Hf=2, Wf=2,
-                                                     strideh=2, stridew=2)
+                                                     strideh=2, stridew=2, 0, 
0)
     ## conv layer 3: conv3 -> relu3 -> pool3
     [outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, 
Woutc2p, Hf, Wf,
                                               stride, stride, pad, pad)
     outc3r = relu::forward(outc3)
     [outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, 
Woutc3, Hf=2, Wf=2,
-                                                     strideh=2, stridew=2)
+                                                     strideh=2, stridew=2, 0, 
0)
     ## affine layer 1:  affine1 -> relu1 -> dropout
     outa1 = affine::forward(outc3p, Wa1, ba1)
     outa1r = relu::forward(outa1)

Reply via email to