indhub closed pull request #10608: [MXNET-307] Add tutorial tests to the CI
URL: https://github.com/apache/incubator-mxnet/pull/10608
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/Jenkinsfile b/Jenkinsfile
index 6c397367aab..a4bf2c492af 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -722,6 +722,28 @@ try {
           }
         }
       }
+    },
+    'tutorial tests Python 2 GPU': {
+      node('mxnetlinux-gpu') {
+        ws('workspace/it-tutorials-py2') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            init_git()
+            unpack_lib('gpu')
+            sh "ci/build.py --shm-size=3g --nvidiadocker --platform ubuntu_gpu 
/work/runtime_functions.sh tutorialtest_ubuntu_python2_gpu"
+          }
+        }
+      }
+    },
+    'tutorial tests Python 3 GPU': {
+      node('mxnetlinux-gpu') {
+        ws('workspace/it-tutorials-py3') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            init_git()
+            unpack_lib('gpu')
+            sh "ci/build.py --shm-size=3g --nvidiadocker --platform ubuntu_gpu 
/work/runtime_functions.sh tutorialtest_ubuntu_python3_gpu"
+          }
+        }
+      }
     }
   }
 
diff --git a/ci/build.py b/ci/build.py
index ee36876db74..6d8d0141170 100755
--- a/ci/build.py
+++ b/ci/build.py
@@ -89,13 +89,19 @@ def buildir() -> str:
     return os.path.join(get_mxnet_root(), "build")
 
 
-def container_run(platform: str, docker_binary: str, command: List[str], 
dry_run: bool = False, into_container: bool = False) -> str:
+def container_run(platform: str,
+                  docker_binary: str,
+                  shared_memory_size: str,
+                  command: List[str],
+                  dry_run: bool = False,
+                  into_container: bool = False) -> str:
     tag = get_docker_tag(platform)
     mx_root = get_mxnet_root()
     local_build_folder = buildir()
     # We need to create it first, otherwise it will be created by the docker 
daemon with root only permissions
     os.makedirs(local_build_folder, exist_ok=True)
-    runlist = [docker_binary, 'run', '--rm',
+    runlist = [docker_binary, 'run', '--rm', '-t',
+        '--shm-size={}'.format(shared_memory_size),
         '-v', "{}:/work/mxnet".format(mx_root), # mount mxnet root
         '-v', "{}:/work/build".format(local_build_folder), # mount mxnet/build 
for storing build artifacts
         '-u', '{}:{}'.format(os.getuid(), os.getgid()),
@@ -157,6 +163,11 @@ def script_name() -> str:
                         help="Use nvidia docker",
                         action='store_true')
 
+    parser.add_argument("--shm-size",
+                        help="Size of the shared memory /dev/shm allocated in 
the container (e.g '1g')",
+                        default='500m',
+                        dest="shared_memory_size")
+
     parser.add_argument("-l", "--list",
                         help="List platforms",
                         action='store_true')
@@ -176,11 +187,11 @@ def script_name() -> str:
     args = parser.parse_args()
     command = list(chain(*args.command))
     docker_binary = get_docker_binary(args.nvidiadocker)
+    shared_memory_size = args.shared_memory_size
 
     print("into container: {}".format(args.into_container))
     if args.list:
         list_platforms()
-
     elif args.platform:
         platform = args.platform
         build_docker(platform, docker_binary)
@@ -190,15 +201,15 @@ def script_name() -> str:
 
         tag = get_docker_tag(platform)
         if command:
-            container_run(platform, docker_binary, command)
+            container_run(platform, docker_binary, shared_memory_size, command)
         elif args.print_docker_run:
-            print(container_run(platform, docker_binary, [], True))
+            print(container_run(platform, docker_binary, shared_memory_size, 
[], True))
         elif args.into_container:
-            container_run(platform, docker_binary, [], False, True)
+            container_run(platform, docker_binary, shared_memory_size, [], 
False, True)
         else:
             cmd = ["/work/mxnet/ci/docker/runtime_functions.sh", 
"build_{}".format(platform)]
             logging.info("No command specified, trying default build: %s", ' 
'.join(cmd))
-            container_run(platform, docker_binary, cmd)
+            container_run(platform, docker_binary, shared_memory_size, cmd)
 
     elif args.all:
         platforms = get_platforms()
@@ -211,7 +222,7 @@ def script_name() -> str:
             build_platform = "build_{}".format(platform)
             cmd = ["/work/mxnet/ci/docker/runtime_functions.sh", 
build_platform]
             shutil.rmtree(buildir(), ignore_errors=True)
-            container_run(platform, docker_binary, cmd)
+            container_run(platform, docker_binary, shared_memory_size, cmd)
             plat_buildir = os.path.join(get_mxnet_root(), build_platform)
             shutil.move(buildir(), plat_buildir)
             logging.info("Built files left in: %s", plat_buildir)
diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu 
b/ci/docker/Dockerfile.build.ubuntu_gpu
index 625d57009c9..547f9843d34 100755
--- a/ci/docker/Dockerfile.build.ubuntu_gpu
+++ b/ci/docker/Dockerfile.build.ubuntu_gpu
@@ -44,6 +44,12 @@ COPY install/ubuntu_llvm.sh /work/
 RUN /work/ubuntu_llvm.sh
 COPY install/ubuntu_caffe.sh /work/
 RUN /work/ubuntu_caffe.sh
+COPY install/ubuntu_onnx.sh /work/
+RUN /work/ubuntu_onnx.sh
+COPY install/ubuntu_docs.sh /work/
+RUN /work/ubuntu_docs.sh
+COPY install/ubuntu_tutorials.sh /work/
+RUN /work/ubuntu_tutorials.sh
 COPY install/ubuntu_adduser.sh /work/
 RUN /work/ubuntu_adduser.sh
 
diff --git a/ci/docker/install/ubuntu_docs.sh b/ci/docker/install/ubuntu_docs.sh
index bb8026f0daa..3729f9951ed 100755
--- a/ci/docker/install/ubuntu_docs.sh
+++ b/ci/docker/install/ubuntu_docs.sh
@@ -25,4 +25,4 @@ wget 
http://downloads.lightbend.com/scala/2.11.8/scala-2.11.8.deb && \
     dpkg -i scala-2.11.8.deb && rm scala-2.11.8.deb
 
 apt-get install -y doxygen libatlas-base-dev graphviz pandoc
-pip install sphinx==1.3.5 CommonMark==0.5.4 breathe mock recommonmark pypandoc 
beautifulsoup4
+pip install sphinx==1.5.6 CommonMark==0.5.4 breathe mock recommonmark==0.4.0 
pypandoc beautifulsoup4
\ No newline at end of file
diff --git a/ci/docker/install/ubuntu_python.sh 
b/ci/docker/install/ubuntu_python.sh
index 948d8a225f6..554000d20ad 100755
--- a/ci/docker/install/ubuntu_python.sh
+++ b/ci/docker/install/ubuntu_python.sh
@@ -29,5 +29,5 @@ wget -nv https://bootstrap.pypa.io/get-pip.py
 python3 get-pip.py
 python2 get-pip.py
 
-pip2 install nose cpplint==1.3.0 pylint==1.8.3 'numpy<1.15.0,>=1.8.2' 
nose-timer 'requests<2.19.0,>=2.18.4' h5py scipy
-pip3 install nose cpplint==1.3.0 pylint==1.8.3 'numpy<1.15.0,>=1.8.2' 
nose-timer 'requests<2.19.0,>=2.18.4' h5py scipy
+pip2 install nose cpplint==1.3.0 pylint==1.8.3 'numpy<1.15.0,>=1.8.2' 
nose-timer 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy
+pip3 install nose cpplint==1.3.0 pylint==1.8.3 'numpy<1.15.0,>=1.8.2' 
nose-timer 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy
diff --git a/ci/docker/install/ubuntu_scala.sh 
b/ci/docker/install/ubuntu_scala.sh
index ee5554d218a..ce17a78dcc3 100755
--- a/ci/docker/install/ubuntu_scala.sh
+++ b/ci/docker/install/ubuntu_scala.sh
@@ -23,9 +23,7 @@
 set -ex
 # install libraries for mxnet's scala package on ubuntu
 apt-get install -y software-properties-common
-add-apt-repository -y ppa:webupd8team/java
 apt-get update
-echo "oracle-java8-installer shared/accepted-oracle-license-v1-1 select true" 
| debconf-set-selections
-apt-get install -y oracle-java8-installer
-apt-get install -y oracle-java8-set-default
-apt-get update && apt-get install -y maven
\ No newline at end of file
+apt-get install -y openjdk-8-jdk
+apt-get install -y openjdk-8-jre
+apt-get update && apt-get install -y maven
diff --git a/ci/docker/install/ubuntu_tutorials.sh 
b/ci/docker/install/ubuntu_tutorials.sh
new file mode 100755
index 00000000000..886ce93c94c
--- /dev/null
+++ b/ci/docker/install/ubuntu_tutorials.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# build and install are separated so changes to build don't invalidate
+# the whole docker cache for the image
+
+set -ex
+apt-get install graphviz python-opencv
+pip2 install jupyter matplotlib Pillow opencv-python scikit-learn graphviz
+pip3 install jupyter matplotlib Pillow opencv-python scikit-learn graphviz
\ No newline at end of file
diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh
index 44de137b6a8..d6a1d6c6490 100755
--- a/ci/docker/runtime_functions.sh
+++ b/ci/docker/runtime_functions.sh
@@ -349,6 +349,7 @@ sanity_check() {
     tools/license_header.py check
     make cpplint rcpplint jnilint
     make pylint
+    nosetests-3.4 tests/tutorials/test_sanity_tutorials.py
 }
 
 
@@ -385,6 +386,28 @@ unittest_ubuntu_python2_gpu() {
     nosetests-2.7 --verbose tests/python/gpu
 }
 
+tutorialtest_ubuntu_python3_gpu() {
+    set -ex
+    cd /work/mxnet/docs
+    export MXNET_DOCS_BUILD_MXNET=0
+    make html
+    export MXNET_STORAGE_FALLBACK_LOG_VERBOSE=0
+    export PYTHONPATH=/work/mxnet/python/
+    export MXNET_TUTORIAL_TEST_KERNEL=python3
+    cd /work/mxnet/tests/tutorials && nosetests-3.4 test_tutorials.py 
--nologcapture
+}
+
+tutorialtest_ubuntu_python2_gpu() {
+    set -ex
+    cd /work/mxnet/docs
+    export MXNET_DOCS_BUILD_MXNET=0
+    make html
+    export MXNET_STORAGE_FALLBACK_LOG_VERBOSE=0
+    export PYTHONPATH=/work/mxnet/python/
+    export MXNET_TUTORIAL_TEST_KERNEL=python2
+    cd /work/mxnet/tests/tutorials && nosetests-3.4 test_tutorials.py 
--nologcapture
+}
+
 unittest_ubuntu_python3_gpu() {
     set -ex
     export PYTHONPATH=./python/
diff --git a/docs/build_version_doc/README.md b/docs/build_version_doc/README.md
index 221618b0aa5..6e768a37be1 100644
--- a/docs/build_version_doc/README.md
+++ b/docs/build_version_doc/README.md
@@ -133,6 +133,8 @@ From the MXNet source root run:
 make docs USE_OPENMP=1
 ```
 
+If you set `MXNET_DOCS_BUILD_MXNET=0`, the doc generation will skip the build 
step. This is useful when you have already built MXNet locally.
+
 The files from `make docs` are viewable in `docs/_build/html/`.
 
 **NOTE:** `make docs` doesn't add any version information, and the versions 
dropdown in the top level navigation is not present. UI bugs can be introduced 
when the versions dropdown is included, so just testing with `make docs` may be 
insufficient.
diff --git a/docs/mxdoc.py b/docs/mxdoc.py
index 7bec720051e..9be7160d16d 100644
--- a/docs/mxdoc.py
+++ b/docs/mxdoc.py
@@ -367,7 +367,11 @@ def add_buttons(app, docname, source):
         # source[i] = '\n'.join(lines)
 
 def setup(app):
-    app.connect("builder-inited", build_mxnet)
+
+    # If MXNET_DOCS_BUILD_MXNET is set something different than 1
+    # Skip the build step
+    if os.getenv('MXNET_DOCS_BUILD_MXNET', '1') == '1':
+        app.connect("builder-inited", build_mxnet)
     app.connect("builder-inited", generate_doxygen)
     app.connect("builder-inited", build_scala_docs)
     # skipped to build r, it requires to install latex, which is kinds of too 
heavy
diff --git a/docs/tutorials/basic/module.md b/docs/tutorials/basic/module.md
index 11a3ffbc51d..dbc7b7a6059 100644
--- a/docs/tutorials/basic/module.md
+++ b/docs/tutorials/basic/module.md
@@ -313,5 +313,3 @@ assert score[0][1] > 0.77, "Achieved accuracy (%f) is less 
than expected (0.77)"
 
 
 <!-- INSERT SOURCE DOWNLOAD BUTTONS -->
-
-
diff --git a/docs/tutorials/gluon/datasets.md b/docs/tutorials/gluon/datasets.md
index 21eac78622f..0b0038def63 100644
--- a/docs/tutorials/gluon/datasets.md
+++ b/docs/tutorials/gluon/datasets.md
@@ -47,13 +47,14 @@ We get a tuple of a data sample and its corresponding 
label, which makes sense b
 
 A 
[`DataLoader`](https://mxnet.incubator.apache.org/api/python/gluon/data.html?highlight=dataloader#mxnet.gluon.data.DataLoader)
 is used to create mini-batches of samples from a 
[`Dataset`](https://mxnet.incubator.apache.org/api/python/gluon/data.html?highlight=dataset#mxnet.gluon.data.Dataset),
 and provides a convenient iterator interface for looping these batches. It's 
typically much more efficient to pass a mini-batch of data through a neural 
network than a single sample at a time, because the computation can be 
performed in parallel. A required parameter of 
[`DataLoader`](https://mxnet.incubator.apache.org/api/python/gluon/data.html?highlight=dataloader#mxnet.gluon.data.DataLoader)
 is the size of the mini-batches you want to create, called `batch_size`.
 
-Another benefit of using 
[`DataLoader`](https://mxnet.incubator.apache.org/api/python/gluon/data.html?highlight=dataloader#mxnet.gluon.data.DataLoader)
 is the ability to easily load data in parallel using 
[`multiprocessing`](https://docs.python.org/3.6/library/multiprocessing.html). 
Just set the `num_workers` parameter to the number of CPUs avaliable on your 
machine for maximum performance.
+Another benefit of using 
[`DataLoader`](https://mxnet.incubator.apache.org/api/python/gluon/data.html?highlight=dataloader#mxnet.gluon.data.DataLoader)
 is the ability to easily load data in parallel using 
[`multiprocessing`](https://docs.python.org/3.6/library/multiprocessing.html). 
You can set the `num_workers` parameter to the number of CPUs avalaible on your 
machine for maximum performance, or limit it to a lower number to spare 
resources.
 
 
 ```python
 from multiprocessing import cpu_count
+CPU_COUNT = cpu_count()
 
-data_loader = mx.gluon.data.DataLoader(dataset, batch_size=5, 
num_workers=cpu_count())
+data_loader = mx.gluon.data.DataLoader(dataset, batch_size=5, 
num_workers=CPU_COUNT)
 
 for X_batch, y_batch in data_loader:
     print("X_batch has shape {}, and y_batch has shape 
{}".format(X_batch.shape, y_batch.shape))
@@ -120,8 +121,8 @@ If you have more complex shuffling requirements (e.g. when 
handling sequential d
 
 ```python
 batch_size = 32
-train_data_loader = mx.gluon.data.DataLoader(train_dataset, batch_size, 
shuffle=True, num_workers=cpu_count())
-valid_data_loader = mx.gluon.data.DataLoader(valid_dataset, batch_size, 
num_workers=cpu_count())
+train_data_loader = mx.gluon.data.DataLoader(train_dataset, batch_size, 
shuffle=True, num_workers=CPU_COUNT)
+valid_data_loader = mx.gluon.data.DataLoader(valid_dataset, batch_size, 
num_workers=CPU_COUNT)
 ```
 
 With both `DataLoader`s defined, we can now train a model to classify each 
image and evaluate the validation loss at each epoch. Our Fashion MNIST dataset 
has 10 classes including shirt, dress, sneakers, etc. We define a simple fully 
connected network with a softmax output and use cross entropy as our loss.
@@ -139,19 +140,23 @@ def construct_net():
     return net
 
 # construct and initialize network.
-ctx = mx.cpu()
+ctx =  mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()
+
 net = construct_net()
 net.hybridize()
-net.initialize(mx.init.Xavier())
+net.initialize(mx.init.Xavier(), ctx=ctx)
 # define loss and trainer.
 criterion = gluon.loss.SoftmaxCrossEntropyLoss()
 trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})
+```
+
+```python
+
 
 epochs = 5
 for epoch in range(epochs):
-
     # training loop (with autograd and trainer steps, etc.)
-    cumulative_train_loss = mx.nd.array([0])
+    cumulative_train_loss = mx.nd.zeros(1, ctx=ctx)
     training_samples = 0
     for batch_idx, (data, label) in enumerate(train_data_loader):
         data = data.as_in_context(ctx).reshape((-1, 784)) # 28*28=784
@@ -166,7 +171,7 @@ for epoch in range(epochs):
     train_loss = cumulative_train_loss.asscalar()/training_samples
 
     # validation loop
-    cumulative_valid_loss = mx.nd.array([0])
+    cumulative_valid_loss = mx.nd.zeros(1, ctx)
     valid_samples = 0
     for batch_idx, (data, label) in enumerate(valid_data_loader):
         data = data.as_in_context(ctx).reshape((-1, 784)) # 28*28=784
diff --git a/docs/tutorials/onnx/fine_tuning_gluon.md 
b/docs/tutorials/onnx/fine_tuning_gluon.md
index c3015428ad7..fc940fc36f9 100644
--- a/docs/tutorials/onnx/fine_tuning_gluon.md
+++ b/docs/tutorials/onnx/fine_tuning_gluon.md
@@ -430,4 +430,4 @@ plot_predictions(caltech101_images_test, result, 
categories, TOP_P)
 
 **Great!** The network classified these images correctly after being 
fine-tuned on a dataset that contains images of `wrench`, `dolphin` and `lotus`
 
-<!-- INSERT SOURCE DOWNLOAD BUTTONS -->
+<!-- INSERT SOURCE DOWNLOAD BUTTONS -->
\ No newline at end of file
diff --git a/docs/tutorials/onnx/inference_on_onnx_model.md 
b/docs/tutorials/onnx/inference_on_onnx_model.md
index bdda820119e..f342dad9bea 100644
--- a/docs/tutorials/onnx/inference_on_onnx_model.md
+++ b/docs/tutorials/onnx/inference_on_onnx_model.md
@@ -265,4 +265,4 @@ We show that in our next tutorial:
 
 - [Fine-tuning an ONNX Model using the modern imperative 
MXNet/Gluon](http://mxnet.incubator.apache.org/tutorials/onnx/fine_tuning_gluon.html)
     
-<!-- INSERT SOURCE DOWNLOAD BUTTONS -->
+<!-- INSERT SOURCE DOWNLOAD BUTTONS -->
\ No newline at end of file
diff --git a/docs/tutorials/sparse/csr.md b/docs/tutorials/sparse/csr.md
index e66b10d9986..c0f9bfd589c 100644
--- a/docs/tutorials/sparse/csr.md
+++ b/docs/tutorials/sparse/csr.md
@@ -531,6 +531,3 @@ except mx.MXNetError as err:
 
 
 <!-- INSERT SOURCE DOWNLOAD BUTTONS -->
-
-
-
diff --git a/docs/tutorials/sparse/train.md b/docs/tutorials/sparse/train.md
index 6475f2e8907..0232281608e 100644
--- a/docs/tutorials/sparse/train.md
+++ b/docs/tutorials/sparse/train.md
@@ -36,6 +36,9 @@ We can specify the `stype` of a variable as "csr" or 
"row_sparse" to hold sparse
 
 ```python
 import mxnet as mx
+
+mx.random.seed(42) # set the seed for repeatability
+
 # Create a variable to hold an NDArray
 a = mx.sym.Variable('a')
 # Create a variable to hold a CSRNDArray
@@ -48,7 +51,7 @@ c = mx.sym.Variable('c', stype='row_sparse')
 
 
 
-    (<Symbol a>, <Symbol b>, <Symbol c>)
+`(<Symbol a>, <Symbol b>, <Symbol c>)` <!--notebook-skip-line-->
 
 
 
diff --git a/docs/tutorials/unsupervised_learning/gan.md 
b/docs/tutorials/unsupervised_learning/gan.md
index 1c99a02cc19..1556bf609aa 100644
--- a/docs/tutorials/unsupervised_learning/gan.md
+++ b/docs/tutorials/unsupervised_learning/gan.md
@@ -1,3 +1,4 @@
+
 # Generative Adversarial Network (GAN)
 
 Generative Adversarial Networks (GANs) are a class of algorithms used in 
unsupervised learning - you don't need labels for your dataset in order to 
train a GAN.
@@ -44,10 +45,9 @@ This example is designed to be trained on a single GPU. 
Training this network on
 To complete this tutorial, you need:
 
 - MXNet
-- Python 2.7, and the following libraries for Python:
+- Python, and the following libraries for Python:
     - Numpy - for matrix math
     - OpenCV - for image manipulation
-    - Scikit-learn - to easily get the MNIST dataset
     - Matplotlib - to visualize the output
 
 ## The Data
@@ -57,30 +57,40 @@ We need two pieces of data to train the DCGAN:
 
 The Generator network will use the random numbers as the input to produce the 
images of handwritten digits, and the Discriminator network will use images of 
handwritten digits from the MNIST dataset to determine if images produced by 
the Generator are realistic.
 
-We are going to use the python library, scikit-learn, to get the MNIST 
dataset. Scikit-learn comes with a function that gets the dataset for us, which 
we will then manipulate to create the training and testing inputs.
-
 The MNIST dataset contains 70,000 images of handwritten digits. Each image is 
28x28 pixels in size. To create random numbers, we're going to create a custom 
MXNet data iterator, which will returns random numbers from a normal 
distribution as we need then.
 
 ## Prepare the Data
 
 ### 1. Preparing the MNSIT dataset
 
-Let us start by preparing the handwritten digits from the MNIST dataset. We 
import the fetch_mldata function from scikit-learn, and use it to get the MNSIT 
dataset. Notice that it's shape is 70000x784. This contains 70000 images, one 
per row and 784 pixels of each image in the columns of each row. Each image is 
28x28 pixels, but has been flattened so that all 784 pixels are represented in 
a single list.
+Let us start by preparing the handwritten digits from the MNIST dataset. 
+```python
+import mxnet as mx
+import numpy as np
+
+mnist_train = mx.gluon.data.vision.datasets.MNIST(train=True)
+mnist_test = mx.gluon.data.vision.datasets.MNIST(train=False)
+```
 
 ```python
-from sklearn.datasets import fetch_mldata
-mnist = fetch_mldata('MNIST original')
+# The downloaded data is of type `Dataset` which are
+# Well suited to work with the new Gluon interface but less
+# With the older symbol API, used in this tutorial. 
+# Therefore we convert them to numpy array first
+X = np.zeros((70000, 28, 28))
+for i, (data, label) in enumerate(mnist_train):
+    X[i] = data.asnumpy()[:,:,0]
+for i, (data, label) in enumerate(mnist_test):
+    X[len(mnist_train)+i] = data.asnumpy()[:,:,0]
 ```
 
-Next, we will randomize the handwritten digits by using numpy to create random 
permutations on the dataset on the rows (images). We will then reshape the 
dataset from 70000x786 to 70000x28x28, so that every image in the dataset is 
arranged into a 28x28 grid, where each cell in the grid represents 1 pixel of 
the image.
+Next, we will randomize the handwritten digits by using numpy to create random 
permutations on the dataset on the rows (images). Every image in the dataset is 
arranged into a 28x28 grid, where each cell in the grid represents 1 pixel of 
the image.
 
 ```python
-import numpy as np
 #Use a seed so that we get the same random permutation each time
 np.random.seed(1)
-p = np.random.permutation(mnist.data.shape[0])
-X = mnist.data[p]
-X = X.reshape((70000, 28, 28))
+p = np.random.permutation(X.shape[0])
+X = X[p]
 ```
 Since the DCGAN that we're creating takes in a 64x64 image as the input, we 
will use OpenCV to resize the each 28x28 image to 64x64 images:
 ```python
@@ -90,7 +100,7 @@ X = np.asarray([cv2.resize(x, (64,64)) for x in X])
 Each pixel in the 64x64 image is represented by a number between 0-255, that 
represents the intensity of the pixel. However, we want to input numbers 
between -1 and 1 into the DCGAN, as suggested by the [research 
paper](https://arxiv.org/abs/1511.06434). To rescale the pixel values, we will 
divide it by (255/2). This changes the scale to 0-2. We then subtract by 1 to 
get them in the range of -1 to 1.
 
 ```python
-X = X.astype(np.float32)/(255.0/2) - 1.0
+X = X.astype(np.float32, copy=False)/(255.0/2) - 1.0
 ```
 Ultimately, images are fed into the neural net through a 70000x3x64x64 array 
but they are currently in a 70000x64x64 array. We need to add 3 channels to the 
images. Typically, when we are working with the images, the 3 channels 
represent the red, green, and blue (RGB) components of each image. Since the 
MNIST dataset is grayscale, we only need 1 channel to represent the dataset. We 
will pad the other channels with 0's:
 
diff --git a/docs/tutorials/vision/large_scale_classification.md 
b/docs/tutorials/vision/large_scale_classification.md
index 4f4059810e7..aac03e4dd90 100644
--- a/docs/tutorials/vision/large_scale_classification.md
+++ b/docs/tutorials/vision/large_scale_classification.md
@@ -68,7 +68,7 @@ n00120010
 ```
 
 ### Remove uncommon classes for transfer learning (optional)
-A common reason to train a network on ImageNet data is to use it for transfer 
learning (including feature extraction or fine-tuning other models). According 
to [this](https://arxiv.org/pdf/1608.08614v1.pdf) study, classes with too few 
images don’t help in transfer learning. So, we could remove classes with fewer 
than a certain number of images. The following code will remove classes with 
less than 500 images.
+A common reason to train a network on ImageNet data is to use it for transfer 
learning (including feature extraction or fine-tuning other models). According 
to [this](https://arxiv.org/pdf/1608.08614v1.pdf) study, classes with too few 
images don't help in transfer learning. So, we could remove classes with fewer 
than a certain number of images. The following code will remove classes with 
less than 500 images.
 
 ```
 BAK=${ROOT}_filtered
@@ -85,7 +85,7 @@ done
 ```
 
 ### Generate a validation set
-To ensure we don’t overfit the data, we will create a validation set separate 
from the training set. During training, we will monitor loss on the validation 
set frequently. We create the validation set by picking fifty random images 
from each class and moving them to the validation set.
+To ensure we don't overfit the data, we will create a validation set separate 
from the training set. During training, we will monitor loss on the validation 
set frequently. We create the validation set by picking fifty random images 
from each class and moving them to the validation set.
 ```
 VAL_ROOT=${ROOT}_val
 mkdir -p ${VAL_ROOT}
@@ -100,7 +100,7 @@ done
 ```
 
 ### Pack images into record files
-While MXNet can read image files directly, it is recommended to pack the image 
files into a recordIO file for increased performance. MXNet provides a tool 
(tools/im2rec.py) to do this. To use this tool, MXNet and OpenCV’s python 
module needs to be installed in the system.
+While MXNet can read image files directly, it is recommended to pack the image 
files into a recordIO file for increased performance. MXNet provides a tool 
(tools/im2rec.py) to do this. To use this tool, MXNet and OpenCV's python 
module needs to be installed in the system.
 
 Set the environment variable `MXNET` to point to the MXNet installation 
directory and `NAME` to the name of the dataset. Here, we assume MXNet is 
installed at `~/mxnet`
 
@@ -157,7 +157,7 @@ We will use 16 machines (P2.16x instances), each containing 
16 GPUs (Tesla K80).
 
 AWS CloudFormation makes it very easy to create deep learning clusters. We 
follow instructions from 
[this](https://aws.amazon.com/blogs/compute/distributed-deep-learning-made-easy/)
 page and create a deep learning cluster with 16 P2.16x instances.
 
-We load the data and code in the first machine (we’ll refer to this machine as 
master). We share both the data and code to other machines using EFS.
+We load the data and code in the first machine (we'll refer to this machine as 
master). We share both the data and code to other machines using EFS.
 
 If you are setting up your cluster manually, without using AWS CloudFormation, 
remember to do the following:
 1. Compile MXNet using `USE_DIST_KVSTORE=1` to enable distributed training.
@@ -177,7 +177,7 @@ If you are setting up your cluster manually, without using 
AWS CloudFormation, r
    ...
    ubuntu@ip-10-0-1-199:~$
    ```
-   One way to do this is to use ssh agent forwarding. Please check 
[this](https://aws.amazon.com/blogs/security/securely-connect-to-linux-instances-running-in-a-private-amazon-vpc/)
 page to learn how to set this up. In short, you’ll configure all machines to 
login using a particular certificate (mycert.pem) which is present on your 
local machine. You then login to the master using the certificate and the `-A` 
switch to enable agent forwarding. Now, from the master, you should be able to 
login to any other machine in the cluster by providing just the hostname 
(example: `ssh deeplearning-worker2`).
+   One way to do this is to use ssh agent forwarding. Please check 
[this](https://aws.amazon.com/blogs/security/securely-connect-to-linux-instances-running-in-a-private-amazon-vpc/)
 page to learn how to set this up. In short, you'll configure all machines to 
login using a particular certificate (mycert.pem) which is present on your 
local machine. You then login to the master using the certificate and the `-A` 
switch to enable agent forwarding. Now, from the master, you should be able to 
login to any other machine in the cluster by providing just the hostname 
(example: `ssh deeplearning-worker2`).
 
 ### Run Training
 After the cluster is setup, login to master and run the following command from 
${MXNET}/example/image-classification
@@ -258,7 +258,7 @@ It is often straightforward to achieve a reasonable 
validation accuracy, but ach
 - Check [this](http://mxnet.io/faq/perf.html) page for more details.
 
 ### Memory
-If the batch size is too big, it can exhaust GPU memory. If this happens, 
you’ll see the error message “cudaMalloc failed: out of memory” or something 
similar. There are a couple of ways to fix this:
+If the batch size is too big, it can exhaust GPU memory. If this happens, 
you'll see the error message "cudaMalloc failed: out of memory" or something 
similar. There are a couple of ways to fix this:
 - Reduce the batch size.
 - Set the environment variable `MXNET_BACKWARD_DO_MIRROR` to 1. It reduces the 
memory consumption by trading off speed. For example, with batch size 64, 
inception-v3 uses 10G memory and trains 30 image/sec on a single K80 GPU. When 
mirroring is enabled, with 10G GPU memory consumption, we can run inception-v3 
using batch size of 128. The cost is that, the speed reduces to 27 images/sec.
 
diff --git a/tests/nightly/Jenkinsfile b/tests/nightly/Jenkinsfile
deleted file mode 100644
index 361e6d7d9f5..00000000000
--- a/tests/nightly/Jenkinsfile
+++ /dev/null
@@ -1,28 +0,0 @@
-// -*- mode: groovy -*-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-
-// Jenkins pipeline
-// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
-// Runs nightly builds
-
-stage("Hello World") {
-  node('mxnetlinux') {
-    sh "echo 'Hello World'"
-  }
-}
\ No newline at end of file
diff --git a/tests/nightly/test_tutorial.py b/tests/nightly/test_tutorial.py
deleted file mode 100644
index 3c9470c7104..00000000000
--- a/tests/nightly/test_tutorial.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-#pylint: disable=no-member, too-many-locals, too-many-branches, no-self-use, 
broad-except, lost-exception, too-many-nested-blocks, too-few-public-methods, 
invalid-name
-"""
-    This script converts all python tutorials into python script
-    and tests whether there is any warning or error.
-    After running python script, it will also convert markdown files
-    to notebooks to make sure notebook execution has no error.
-"""
-import os
-import warnings
-import imp
-import shutil
-import time
-import argparse
-import traceback
-import nbformat
-from nbconvert.preprocessors import ExecutePreprocessor
-import sys
-
-fail_dict = {}
-TIME_OUT = 1800
-
-def test_tutorial_nb(file_path, workingdir, kernel=None):
-    """Run tutorial jupyter notebook to catch any execution error.
-
-    Parameters
-    ----------
-    file_path : str
-        path of tutorial .ipynb file
-    workingdir: str
-        path of the directory to run the tutorial in
-    kernel: str
-        Default None
-        name of the kernel to use, if none, will use first kernel 
-        in the list
-    """
-    tutorial_name = os.path.basename(file_path)
-    sys.stdout.write('Testing {}...'.format(file_path))
-    sys.stdout.flush()
-    tick = time.time()
-    notebook = nbformat.read(file_path + '.ipynb', as_version=4)
-    if kernel:
-        eprocessor = ExecutePreprocessor(timeout=TIME_OUT, kernel_name=kernel)
-    else:
-        eprocessor = ExecutePreprocessor(timeout=TIME_OUT)
-    success = True
-    try:
-        eprocessor.preprocess(notebook, {'metadata': {'path':workingdir}})
-    except Exception as err:
-        err_msg = str(err)
-        fail_dict[tutorial_name] = err_msg
-        success = False
-    finally:
-        output_file = os.path.join(workingdir, "output.txt")
-        output_nb = open(output_file, mode='w')
-        nbformat.write(notebook, output_nb)
-        output_nb.close()
-        output_nb = open(output_file, mode='r')
-        for line in output_nb:
-            if "Warning:" in line:
-                success = False
-                if tutorial_name in fail_dict:
-                    fail_dict[tutorial_name] += "\n"+line
-                else:
-                    fail_dict[tutorial_name] = "Warning:\n"+line
-        sys.stdout.write(' Elapsed time: {0:.2f}s '.format(time.time()-tick  ))
-        sys.stdout.write(' [{}] \n'.format('Success' if success else 'Failed'))
-        sys.stdout.flush()
-
-
-if __name__ == "__main__":
-    tutorial_dir = os.path.join('..','..','docs', '_build', 'html', 
'tutorials')
-    tick = time.time()
-    
-    parser = argparse.ArgumentParser()
-    parser.add_argument("--tutorial", help="tutorial to test, if not set, read 
from test_tutorial_config.txt")
-    parser.add_argument("--kernel", help="name of the jupyter kernel to use 
for the test")
-    parser.add_argument("--no-cache", help="clean the temp directory", 
action="store_true", dest="no_cache")
-    args = parser.parse_args()
-    
-
-    tutorial_list = []
-    if args.tutorial:
-        tutorial_list.append(args.tutorial)
-    else:
-        with open('test_tutorial_config.txt') as config_file:
-            for line in config_file:
-                tutorial_list.append(line.lstrip().rstrip())
-    
-    temp_dir = 'tmp_notebook'
-    if args.no_cache:
-        print("Cleaning and setting up temp directory '{}'".format(temp_dir))
-        shutil.rmtree(temp_dir, ignore_errors=True)
-    
-    kernel = args.kernel if args.kernel else None
-    
-    for tutorial in tutorial_list:
-        file_dir = os.path.join(*([tutorial_dir]+tutorial.split('/')))
-        working_dir = os.path.join(*([temp_dir]+tutorial.split('/')))
-        if not os.path.isdir(working_dir):
-            os.makedirs(working_dir)
-        test_tutorial_nb(file_dir, working_dir, kernel)
-
-    fail_num = len(fail_dict)
-    success_num = len(tutorial_list) - fail_num
-    print("Test Summary Start")
-    print("%d tutorials tested:" % (len(tutorial_list)))
-    for tutorial in tutorial_list:
-        print(tutorial)
-    print("\n%d tests failed:" % (fail_num))
-    for tutorial, msg in fail_dict.items():
-        print(tutorial + ":")
-        print(msg)
-    print("Test Summary End")
-    print("Stats start")
-    print("[Passed: %d of %d]" % (success_num, len(tutorial_list)))
-    print("Total time: {:.2f}s".format(time.time()-tick))
-    print("Stats end")
-
-    if fail_num > 0:
-        exit(1)
-
diff --git a/tests/nightly/test_tutorial_config.txt 
b/tests/nightly/test_tutorial_config.txt
deleted file mode 100644
index be249b0c56a..00000000000
--- a/tests/nightly/test_tutorial_config.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-basic/ndarray
-basic/ndarray_indexing
-basic/symbol
-basic/module
-basic/data
-gluon/customop
-gluon/data_augmentation
-gluon/datasets
-gluon/ndarray
-gluon/mnist
-gluon/autograd
-gluon/gluon
-gluon/hybrid
-nlp/cnn
-onnx/super_resolution
-onnx/fine_tuning_gluon
-onnx/inference_on_onnx_model
-python/matrix_factorization
-python/linear-regression
-python/mnist
-python/predict_image
-python/data_augmentation
-python/data_augmentation_with_masks
-python/kvstore
-python/types_of_data_augmentation
-sparse/row_sparse
-sparse/csr
-sparse/train
-speech_recognition/ctc
-unsupervised_learning/gan
-vision/large_scale_classification
diff --git a/tests/tutorials/test_sanity_tutorials.py 
b/tests/tutorials/test_sanity_tutorials.py
new file mode 100644
index 00000000000..eec6f4e40a7
--- /dev/null
+++ b/tests/tutorials/test_sanity_tutorials.py
@@ -0,0 +1,83 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import glob
+import os
+import re
+
+# White list of tutorials that should not have a
+# 'Download jupyter notebook' button or be added to the
+# automated test suite.
+# Rules to be in the whitelist:
+# - not a python tutorial
+whitelist = ['c++/basics.md',
+             'embedded/wine_detector.md',
+             'r/CallbackFunction.md',
+             'r/charRnnModel.md',
+             'r/classifyRealImageWithPretrainedModel.md',
+             'r/CustomIterator.md',
+             'r/CustomLossFunction.md',
+             'r/fiveMinutesNeuralNetwork.md',
+             'r/index.md',
+             'r/mnistCompetition.md',
+             'r/ndarray.md',
+             'r/symbol.md',
+             'scala/char_lstm.md',
+             'scala/mnist.md',
+             'scala/README.md',
+             'scala/mxnet_scala_on_intellij.md']
+whitelist_set = set(whitelist)
+
+def test_tutorial_downloadable():
+    """
+    Make sure that every tutorial that isn't in the whitelist has the 
placeholder
+    that enables notebook download
+    """
+    download_button_string = '<!-- INSERT SOURCE DOWNLOAD BUTTONS -->'
+
+    tutorial_path = os.path.join(os.path.dirname(__file__), '..', '..', 
'docs', 'tutorials')
+    tutorials = glob.glob(os.path.join(tutorial_path, '**', '*.md'))
+
+    for tutorial in tutorials:
+        with open(tutorial, 'r') as file:
+            lines= file.readlines()
+        last = lines[-1]
+        second_last = lines[-2]
+        downloadable = download_button_string in last or 
download_button_string in second_last
+        friendly_name = '/'.join(tutorial.split('/')[-2:])
+        if not downloadable and friendly_name  not in whitelist_set:
+            print(last, second_last)
+            assert False, "{} is missing <!-- INSERT SOURCE DOWNLOAD BUTTONS 
--> as its last line".format(friendly_name)
+
+def test_tutorial_tested():
+    """
+    Make sure that every tutorial that isn't in the whitelist
+    has been added to the tutorial test file
+    """
+    tutorial_test_file = os.path.join(os.path.dirname(__file__), 
'test_tutorials.py')
+    f = open(tutorial_test_file, 'r')
+    tutorial_test_text = '\n'.join(f.readlines())
+    tutorial_path = os.path.join(os.path.dirname(__file__), '..', '..', 
'docs', 'tutorials')
+    tutorials = glob.glob(os.path.join(tutorial_path, '**', '*.md'))
+
+    tested_tutorials = set(re.findall(r"assert _test_tutorial_nb\('(.*)'\)", 
tutorial_test_text))
+    for tutorial in tutorials:
+        friendly_name = '/'.join(tutorial.split('/')[-2:]).split('.')[0]
+        if friendly_name not in tested_tutorials and friendly_name+".md" not 
in whitelist_set:
+            assert False, "{} has not been added to the 
tests/tutorials/test_tutorials.py test_suite".format(friendly_name)
+
+
diff --git a/tests/tutorials/test_tutorials.py 
b/tests/tutorials/test_tutorials.py
new file mode 100644
index 00000000000..0949da7cebf
--- /dev/null
+++ b/tests/tutorials/test_tutorials.py
@@ -0,0 +1,200 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+#pylint: disable=no-member, too-many-locals, too-many-branches, no-self-use, 
broad-except, lost-exception, too-many-nested-blocks, too-few-public-methods, 
invalid-name
+"""
+    This file tests and ensures that all tutorials notebooks run
+    without warning or exception.
+
+    env variable MXNET_TUTORIAL_TEST_KERNEL controls which kernel to use
+    when running the notebook. e.g:
+    `export MXNET_TUTORIAL_TEST_KERNEL=python2`
+
+    env variable MXNET_TUTORIAL_TEST_NO_CACHE controls whether to clean the
+    temporary directory in which the notebook was run and re-download any
+    resource file. The default behaviour is to not clean the directory. Set to 
'1'
+    to force clean the directory. e.g:
+    `export MXNET_TUTORIAL_TEST_NO_CACHE=1`
+    NB: in the real CI, the tests will re-download everything since they start 
from
+    a clean workspace.
+"""
+import os
+import warnings
+import imp
+import shutil
+import time
+import argparse
+import traceback
+import nbformat
+from nbconvert.preprocessors import ExecutePreprocessor
+import sys
+
+
+# Maximum 7 minutes per test
+# Reaching timeout causes a test failure
+TIME_OUT = 7*60
+# Pin to ipython version 4
+IPYTHON_VERSION = 4
+temp_dir = 'tmp_notebook'
+
+def _test_tutorial_nb(tutorial):
+    """Run tutorial jupyter notebook to catch any execution error.
+
+    Parameters
+    ----------
+    tutorial : str
+        tutorial name in folder/tutorial format
+    """
+
+    tutorial_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'docs', 
'_build', 'html', 'tutorials')
+    tutorial_path = os.path.join(*([tutorial_dir] + tutorial.split('/')))
+
+    # see env variable docs in the doc string of the file
+    kernel = os.getenv('MXNET_TUTORIAL_TEST_KERNEL', None)
+    no_cache = os.getenv('MXNET_TUTORIAL_TEST_NO_CACHE', False)
+
+    working_dir = os.path.join(*([temp_dir] + tutorial.split('/')))
+
+    if no_cache == '1':
+        print("Cleaning and setting up temp directory 
'{}'".format(working_dir))
+        shutil.rmtree(temp_dir, ignore_errors=True)
+
+    errors = []
+    notebook = None
+    if not os.path.isdir(working_dir):
+        os.makedirs(working_dir)
+    try:
+        notebook = nbformat.read(tutorial_path + '.ipynb', 
as_version=IPYTHON_VERSION)
+        if kernel is not None:
+            eprocessor = ExecutePreprocessor(timeout=TIME_OUT, 
kernel_name=kernel)
+        else:
+            eprocessor = ExecutePreprocessor(timeout=TIME_OUT)
+        nb, stuff = eprocessor.preprocess(notebook, {'metadata': {'path': 
working_dir}})
+        print(stuff)
+    except Exception as err:
+        err_msg = str(err)
+        errors.append(err_msg)
+    finally:
+        if notebook is not None:
+            output_file = os.path.join(working_dir, "output.txt")
+            nbformat.write(notebook, output_file)
+            output_nb = open(output_file, mode='r')
+            for line in output_nb:
+                if "Warning:" in line:
+                    errors.append("Warning:\n"+line)
+        if len(errors) > 0:
+            print('\n'.join(errors))
+            return False
+        return True
+
+
+
+def test_basic_ndarray():
+   assert _test_tutorial_nb('basic/ndarray')
+
+def test_basic_ndarray_indexing():
+    assert _test_tutorial_nb('basic/ndarray_indexing')
+
+def test_basic_symbol():
+    assert _test_tutorial_nb('basic/symbol')
+
+def test_basic_module():
+    assert _test_tutorial_nb('basic/module')
+
+def test_basic_data():
+    assert _test_tutorial_nb('basic/data')
+
+def test_gluon_customop():
+    assert _test_tutorial_nb('gluon/customop')
+
+def test_gluon_data_augmentation():
+    assert _test_tutorial_nb('gluon/data_augmentation')
+
+def test_gluon_datasets():
+    assert _test_tutorial_nb('gluon/datasets')
+
+def test_gluon_naming():
+    assert _test_tutorial_nb('gluon/naming')
+
+def test_gluon_ndarray():
+    assert _test_tutorial_nb('gluon/ndarray')
+
+def test_gluon_mnist():
+    assert _test_tutorial_nb('gluon/mnist')
+
+def test_gluon_autograd():
+    assert _test_tutorial_nb('gluon/autograd')
+
+def test_gluon_gluon():
+    assert _test_tutorial_nb('gluon/gluon')
+
+def test_gluon_hybrid():
+    assert _test_tutorial_nb('gluon/hybrid')
+
+def test_nlp_cnn():
+    assert _test_tutorial_nb('nlp/cnn')
+
+def test_onnx_super_resolution():
+    assert _test_tutorial_nb('onnx/super_resolution')
+
+def test_onnx_fine_tuning_gluon():
+    assert _test_tutorial_nb('onnx/fine_tuning_gluon')
+
+def test_onnx_inference_on_onnx_model():
+    assert _test_tutorial_nb('onnx/inference_on_onnx_model')
+
+def test_python_matrix_factorization():
+    assert _test_tutorial_nb('python/matrix_factorization')
+
+def test_python_linear_regression() :
+    assert _test_tutorial_nb('python/linear-regression')
+
+def test_python_mnist():
+    assert _test_tutorial_nb('python/mnist')
+
+def test_python_predict_image():
+    assert _test_tutorial_nb('python/predict_image')
+
+def test_python_data_augmentation():
+    assert _test_tutorial_nb('python/data_augmentation')
+
+def test_python_data_augmentation_with_masks():
+    assert _test_tutorial_nb('python/data_augmentation_with_masks')
+
+def test_python_kvstore():
+    assert _test_tutorial_nb('python/kvstore')
+
+def test_python_types_of_data_augmentation():
+    assert _test_tutorial_nb('python/types_of_data_augmentation')
+
+def test_sparse_row_sparse():
+    assert _test_tutorial_nb('sparse/row_sparse')
+
+def test_sparse_csr():
+    assert _test_tutorial_nb('sparse/csr')
+
+def test_sparse_train():
+    assert _test_tutorial_nb('sparse/train')
+
+def test_speech_recognition_ctc():
+    assert _test_tutorial_nb('speech_recognition/ctc')
+
+def test_unsupervised_learning_gan():
+    assert _test_tutorial_nb('unsupervised_learning/gan')
+
+def test_vision_large_scale_classification():
+    assert _test_tutorial_nb('vision/large_scale_classification')


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to