This is an automated email from the ASF dual-hosted git repository.

marcoabreu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 779faef  [MXNET-131] Refine build.py, misc fixes, tweak commandline, 
add help (#10202)
779faef is described below

commit 779faef2eb909a36555c420c0358108a1f469efe
Author: Pedro Larroy <928489+lar...@users.noreply.github.com>
AuthorDate: Wed Apr 4 18:46:58 2018 +0200

    [MXNET-131] Refine build.py, misc fixes, tweak commandline, add help 
(#10202)
---
 Jenkinsfile  |  78 ++++++++++++++++++++--------------------
 ci/README.md |  37 +++++++++++++------
 ci/build.py  | 116 ++++++++++++++++++++++++++++++++++++++++++++++++-----------
 3 files changed, 160 insertions(+), 71 deletions(-)

diff --git a/Jenkinsfile b/Jenkinsfile
index 560829a..45b86fb 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -94,14 +94,14 @@ echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
 // Python 2
 def python2_ut(docker_container_name) {
   timeout(time: max_time, unit: 'MINUTES') {
-    sh "ci/build.py --build --platform ${docker_container_name} 
/work/runtime_functions.sh unittest_ubuntu_python2_cpu"
+    sh "ci/build.py --platform ${docker_container_name} 
/work/runtime_functions.sh unittest_ubuntu_python2_cpu"
   }
 }
 
 // Python 3
 def python3_ut(docker_container_name) {
   timeout(time: max_time, unit: 'MINUTES') {
-    sh "ci/build.py --build --platform ${docker_container_name} 
/work/runtime_functions.sh unittest_ubuntu_python3_cpu"
+    sh "ci/build.py --platform ${docker_container_name} 
/work/runtime_functions.sh unittest_ubuntu_python3_cpu"
   }
 }
 
@@ -110,14 +110,14 @@ def python3_ut(docker_container_name) {
 // Python 2
 def python2_gpu_ut(docker_container_name) {
   timeout(time: max_time, unit: 'MINUTES') {
-    sh "ci/build.py --nvidiadocker --build --platform ${docker_container_name} 
/work/runtime_functions.sh unittest_ubuntu_python2_gpu"
+    sh "ci/build.py --nvidiadocker --platform ${docker_container_name} 
/work/runtime_functions.sh unittest_ubuntu_python2_gpu"
   }
 }
 
 // Python 3
 def python3_gpu_ut(docker_container_name) {
   timeout(time: max_time, unit: 'MINUTES') {
-    sh "ci/build.py --nvidiadocker --build --platform ${docker_container_name} 
/work/runtime_functions.sh unittest_ubuntu_python3_gpu"
+    sh "ci/build.py --nvidiadocker --platform ${docker_container_name} 
/work/runtime_functions.sh unittest_ubuntu_python3_gpu"
   }
 }
 
@@ -126,7 +126,7 @@ try {
     node('mxnetlinux-cpu') {
       ws('workspace/sanity') {
         init_git()
-        sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh sanity_check"
+        sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
sanity_check"
       }
     }
   }
@@ -136,7 +136,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-centos7-cpu') {
           init_git()
-          sh "ci/build.py --build --platform centos7_cpu 
/work/runtime_functions.sh build_centos7_cpu"
+          sh "ci/build.py --platform centos7_cpu /work/runtime_functions.sh 
build_centos7_cpu"
           pack_lib('centos7_cpu')
         }
       }
@@ -145,7 +145,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-centos7-mkldnn') {
           init_git()
-          sh "ci/build.py --build --platform centos7_cpu 
/work/runtime_functions.sh build_centos7_mkldnn"
+          sh "ci/build.py --platform centos7_cpu /work/runtime_functions.sh 
build_centos7_mkldnn"
           pack_lib('centos7_mkldnn')
         }
       }
@@ -154,7 +154,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-centos7-gpu') {
           init_git()
-          sh "ci/build.py --build --platform centos7_gpu 
/work/runtime_functions.sh build_centos7_gpu"
+          sh "ci/build.py --platform centos7_gpu /work/runtime_functions.sh 
build_centos7_gpu"
           pack_lib('centos7_gpu')
         }
       }
@@ -163,7 +163,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-cpu-openblas') {
           init_git()
-          sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh build_ubuntu_cpu_openblas"
+          sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
build_ubuntu_cpu_openblas"
           pack_lib('cpu')
         }
       }
@@ -172,7 +172,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-cpu-clang39') {
           init_git()
-          sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh build_ubuntu_cpu_clang39"
+          sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
build_ubuntu_cpu_clang39"
         }
       }
     },
@@ -180,7 +180,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-cpu-clang50') {
           init_git()
-          sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh build_ubuntu_cpu_clang50"
+          sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
build_ubuntu_cpu_clang50"
         }
       }
     },
@@ -188,7 +188,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-cpu-mkldnn-clang39') {
           init_git()
-          sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh build_ubuntu_cpu_clang39_mkldnn"
+          sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
build_ubuntu_cpu_clang39_mkldnn"
           pack_lib('mkldnn_cpu_clang3', mx_mkldnn_lib)
         }
       }
@@ -197,7 +197,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-cpu-mkldnn-clang50') {
           init_git()
-          sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh build_ubuntu_cpu_clang50_mkldnn"
+          sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
build_ubuntu_cpu_clang50_mkldnn"
           pack_lib('mkldnn_cpu_clang5', mx_mkldnn_lib)
         }
       }
@@ -206,7 +206,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-mkldnn-cpu') {
           init_git()
-          sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh build_ubuntu_cpu_mkldnn"
+          sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
build_ubuntu_cpu_mkldnn"
           pack_lib('mkldnn_cpu', mx_mkldnn_lib)
         }
       }
@@ -215,7 +215,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-mkldnn-gpu') {
           init_git()
-          sh "ci/build.py --build --platform ubuntu_build_cuda 
/work/runtime_functions.sh build_ubuntu_gpu_mkldnn"
+          sh "ci/build.py --platform ubuntu_build_cuda 
/work/runtime_functions.sh build_ubuntu_gpu_mkldnn"
           pack_lib('mkldnn_gpu', mx_mkldnn_lib)
         }
       }
@@ -224,7 +224,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-gpu') {
           init_git()
-          sh "ci/build.py --build --platform ubuntu_build_cuda 
/work/runtime_functions.sh build_ubuntu_gpu_cuda91_cudnn7" 
+          sh "ci/build.py --platform ubuntu_build_cuda 
/work/runtime_functions.sh build_ubuntu_gpu_cuda91_cudnn7" 
           pack_lib('gpu')
           stash includes: 'build/cpp-package/example/test_score', name: 
'cpp_test_score'
           stash includes: 'build/cpp-package/example/test_optimizer', name: 
'cpp_test_optimizer'
@@ -235,7 +235,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/amalgamationmin') {
           init_git()
-          sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh build_ubuntu_amalgamation_min"
+          sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
build_ubuntu_amalgamation_min"
         }
       }
     },
@@ -243,7 +243,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/amalgamation') {
           init_git()
-          sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh build_ubuntu_amalgamation"
+          sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
build_ubuntu_amalgamation"
         }
       }
     },
@@ -252,7 +252,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-cmake-mkldnn-gpu') {
           init_git()
-          sh "ci/build.py --build --platform ubuntu_gpu 
/work/runtime_functions.sh build_ubuntu_gpu_cmake_mkldnn" //build_cuda
+          sh "ci/build.py --platform ubuntu_gpu /work/runtime_functions.sh 
build_ubuntu_gpu_cmake_mkldnn" //build_cuda
           pack_lib('cmake_mkldnn_gpu', mx_cmake_mkldnn_lib)
         }
       }
@@ -261,7 +261,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-cmake-gpu') {
           init_git()
-          sh "ci/build.py --build --platform ubuntu_gpu 
/work/runtime_functions.sh build_ubuntu_gpu_cmake" //build_cuda
+          sh "ci/build.py --platform ubuntu_gpu /work/runtime_functions.sh 
build_ubuntu_gpu_cmake" //build_cuda
           pack_lib('cmake_gpu', mx_cmake_lib)
         }
       }
@@ -336,7 +336,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-jetson-armv8') {
           init_git()
-          sh "ci/build.py --build --platform jetson /work/runtime_functions.sh 
build_jetson"
+          sh "ci/build.py --platform jetson /work/runtime_functions.sh 
build_jetson"
         }
       }
     },
@@ -344,7 +344,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-raspberry-armv7') {
           init_git()
-          sh "ci/build.py --build --platform armv7 /work/runtime_functions.sh 
build_armv7"
+          sh "ci/build.py --platform armv7 /work/runtime_functions.sh 
build_armv7"
         }
       }
     },
@@ -352,7 +352,7 @@ try {
       node('mxnetlinux-cpu') {
         ws('workspace/build-raspberry-armv6') {
           init_git()
-          sh "ci/build.py --build --platform armv6 /work/runtime_functions.sh 
build_armv6"
+          sh "ci/build.py --platform armv6 /work/runtime_functions.sh 
build_armv6"
         }
       }
     }
@@ -400,7 +400,7 @@ try {
         ws('workspace/ut-python2-quantize-gpu') {
           init_git()
           unpack_lib('gpu', mx_lib)
-          sh "ci/build.py --nvidiadocker --build --platform ubuntu_gpu 
/work/runtime_functions.sh unittest_ubuntu_python2_quantization_gpu"
+          sh "ci/build.py --nvidiadocker --platform ubuntu_gpu 
/work/runtime_functions.sh unittest_ubuntu_python2_quantization_gpu"
         }
       }
     },
@@ -409,7 +409,7 @@ try {
         ws('workspace/ut-python3-quantize-gpu') {
           init_git()
           unpack_lib('gpu', mx_lib)
-          sh "ci/build.py --nvidiadocker --build --platform ubuntu_gpu 
/work/runtime_functions.sh unittest_ubuntu_python3_quantization_gpu"
+          sh "ci/build.py --nvidiadocker --platform ubuntu_gpu 
/work/runtime_functions.sh unittest_ubuntu_python3_quantization_gpu"
         }
       }
     },
@@ -455,7 +455,7 @@ try {
           init_git()
           unpack_lib('centos7_cpu')
           timeout(time: max_time, unit: 'MINUTES') {
-            sh "ci/build.py --build --platform centos7_cpu 
/work/runtime_functions.sh unittest_centos7_cpu"
+            sh "ci/build.py --platform centos7_cpu /work/runtime_functions.sh 
unittest_centos7_cpu"
           }
         }
       }
@@ -466,7 +466,7 @@ try {
           init_git()
           unpack_lib('centos7_gpu')
           timeout(time: max_time, unit: 'MINUTES') {
-            sh "ci/build.py --nvidiadocker --build --platform centos7_gpu 
/work/runtime_functions.sh unittest_centos7_gpu"
+            sh "ci/build.py --nvidiadocker --platform centos7_gpu 
/work/runtime_functions.sh unittest_centos7_gpu"
           }
         }
       }
@@ -477,7 +477,7 @@ try {
           init_git()
           unpack_lib('cpu')
           timeout(time: max_time, unit: 'MINUTES') {
-            sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh unittest_ubuntu_cpu_scala"
+            sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
unittest_ubuntu_cpu_scala"
           }
         }
       }
@@ -488,7 +488,7 @@ try {
           init_git()
           unpack_lib('gpu')
           timeout(time: max_time, unit: 'MINUTES') {
-            sh "ci/build.py --nvidiadocker --build --platform ubuntu_gpu 
/work/runtime_functions.sh unittest_ubuntu_gpu_scala"
+            sh "ci/build.py --nvidiadocker --platform ubuntu_gpu 
/work/runtime_functions.sh unittest_ubuntu_gpu_scala"
           }
         }
       }
@@ -499,7 +499,7 @@ try {
           init_git()
           unpack_lib('cpu')
           timeout(time: max_time, unit: 'MINUTES') {
-            sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh unittest_ubuntu_cpugpu_perl"
+            sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
unittest_ubuntu_cpugpu_perl"
           }
         }
       }
@@ -510,7 +510,7 @@ try {
           init_git()
           unpack_lib('gpu')
           timeout(time: max_time, unit: 'MINUTES') {
-            sh "ci/build.py --nvidiadocker --build --platform ubuntu_gpu 
/work/runtime_functions.sh unittest_ubuntu_cpugpu_perl"
+            sh "ci/build.py --nvidiadocker --platform ubuntu_gpu 
/work/runtime_functions.sh unittest_ubuntu_cpugpu_perl"
           }
         }
       }
@@ -521,7 +521,7 @@ try {
           init_git()
           unpack_lib('cmake_gpu', mx_cmake_lib)
           timeout(time: max_time, unit: 'MINUTES') {
-            sh "ci/build.py --nvidiadocker --build --platform ubuntu_gpu 
/work/runtime_functions.sh unittest_ubuntu_gpu_cpp"
+            sh "ci/build.py --nvidiadocker --platform ubuntu_gpu 
/work/runtime_functions.sh unittest_ubuntu_gpu_cpp"
           }
         }
       }
@@ -532,7 +532,7 @@ try {
           init_git()
           unpack_lib('cpu')
           timeout(time: max_time, unit: 'MINUTES') {
-            sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh unittest_ubuntu_cpu_R"
+            sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
unittest_ubuntu_cpu_R"
           }
         }
       }
@@ -543,7 +543,7 @@ try {
           init_git()
           unpack_lib('gpu')
           timeout(time: max_time, unit: 'MINUTES') {
-            sh "ci/build.py --nvidiadocker --build --platform ubuntu_gpu 
/work/runtime_functions.sh unittest_ubuntu_gpu_R"
+            sh "ci/build.py --nvidiadocker --platform ubuntu_gpu 
/work/runtime_functions.sh unittest_ubuntu_gpu_R"
           }
         }
       }
@@ -630,7 +630,7 @@ try {
           init_git()
           unpack_lib('cpu')
           timeout(time: max_time, unit: 'MINUTES') {
-               sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh integrationtest_ubuntu_cpu_onnx"
+            sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
integrationtest_ubuntu_cpu_onnx"
           }
         }
       }
@@ -641,7 +641,7 @@ try {
           init_git()
           unpack_lib('gpu')
           timeout(time: max_time, unit: 'MINUTES') {
-            sh "ci/build.py --nvidiadocker --build --platform ubuntu_gpu 
/work/runtime_functions.sh integrationtest_ubuntu_gpu_python"
+            sh "ci/build.py --nvidiadocker --platform ubuntu_gpu 
/work/runtime_functions.sh integrationtest_ubuntu_gpu_python"
           }
         }
       }
@@ -652,7 +652,7 @@ try {
           init_git()
           unpack_lib('gpu')
           timeout(time: max_time, unit: 'MINUTES') {
-            sh "ci/build.py --nvidiadocker --build --platform ubuntu_gpu 
/work/runtime_functions.sh integrationtest_ubuntu_gpu_caffe"
+            sh "ci/build.py --nvidiadocker --platform ubuntu_gpu 
/work/runtime_functions.sh integrationtest_ubuntu_gpu_caffe"
           }
         }
       }
@@ -665,7 +665,7 @@ try {
           unstash 'cpp_test_score'
           unstash 'cpp_test_optimizer'
           timeout(time: max_time, unit: 'MINUTES') {
-            sh "ci/build.py --nvidiadocker --build --platform ubuntu_gpu 
/work/runtime_functions.sh integrationtest_ubuntu_gpu_cpp_package"
+            sh "ci/build.py --nvidiadocker --platform ubuntu_gpu 
/work/runtime_functions.sh integrationtest_ubuntu_gpu_cpp_package"
           }
         }
       }
@@ -677,7 +677,7 @@ try {
       ws('workspace/docs') {
         init_git()
         timeout(time: max_time, unit: 'MINUTES') {
-          sh "ci/build.py --build --platform ubuntu_cpu 
/work/runtime_functions.sh deploy_docs"
+          sh "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh 
deploy_docs"
           sh "tests/ci_build/deploy/ci_deploy_doc.sh ${env.BRANCH_NAME} 
${env.BUILD_NUMBER}"
         }        
       }
diff --git a/ci/README.md b/ci/README.md
index 98c74e3..1c59a3a 100644
--- a/ci/README.md
+++ b/ci/README.md
@@ -1,10 +1,12 @@
 # Containerized build & test utilities
 
-This folder contains scripts and dockerfiles used to build and test MXNet 
using Docker containers
+This folder contains scripts and dockerfiles used to build and test MXNet using
+Docker containers
 
 You need docker and nvidia docker if you have a GPU.
 
-If you are in ubuntu an easy way to install Docker CE is executing the 
following script:
+If you are in ubuntu an easy way to install Docker CE is executing the
+following script:
 
 
 ```
@@ -29,22 +31,35 @@ For detailed instructions go to the docker documentation.
 
 ## build.py
 
-The main utility to build is build.py which will run docker and mount the 
mxnet folder as a volume
-to do in-place builds.
+The main utility to build is build.py which will run docker and mount the mxnet
+folder as a volume to do in-place builds.
 
-The build.py script does two functions, build the docker image, and it can be 
also used to run
-commands inside this image with the propper mounts and paraphernalia required 
to build mxnet inside
-docker from the sources on the parent folder.
+The build.py script does two functions, build the docker image, and it can be
+also used to run commands inside this image with the propper mounts and
+paraphernalia required to build mxnet inside docker from the sources on the
+parent folder.
 
-A set of helper shell functions are in `functions.sh`. `build.py --help` will 
display usage
+A set of helper shell functions are in `docker/runtime_functions.sh`.
+`build.py` without arguments or `build.py --help` will display usage
 information about the tool.
 
 To build for armv7 for example:
 
 ```
-./build.py -p armv7 /work/functions.sh build_armv7
+./build.py -p armv7
 ```
 
+The artifacts are located in the build/ directory in the project root. In case
+`build.py -a` is invoked, the artifacts are located in build.<platform>/
+
+## Add a platform
+
+To add a platform, you should add the appropiate dockerfile in
+docker/Dockerfile.build.<platform> and add a shell function named
+build_<platform> to the file docker/runtime_functions.sh with build
+instructions for that platform.
+
 ## Warning
-Due to current limitations of the CMake build system creating artifacts in the 
source 3rdparty
-folder of the parent mxnet sources concurrent builds of different platforms is 
NOT SUPPORTED.
+Due to current limitations of the CMake build system creating artifacts in the
+source 3rdparty folder of the parent mxnet sources concurrent builds of
+different platforms is NOT SUPPORTED.
diff --git a/ci/build.py b/ci/build.py
index 8caf52b..e1e4560 100755
--- a/ci/build.py
+++ b/ci/build.py
@@ -36,6 +36,7 @@ import re
 from typing import *
 from itertools import chain
 from copy import deepcopy
+import shutil
 
 
 def get_platforms(path: Optional[str]="docker"):
@@ -48,7 +49,7 @@ def get_platforms(path: Optional[str]="docker"):
     return platforms
 
 
-def get_docker_tag(platform: str) -> None:
+def get_docker_tag(platform: str) -> str:
     return "mxnet/build.{0}".format(platform)
 
 
@@ -84,14 +85,15 @@ def get_mxnet_root() -> str:
         curpath = parent
     return curpath
 
+def buildir() -> str:
+    return os.path.join(get_mxnet_root(), "build")
 
-def container_run(platform: str, docker_binary: str, command: List[str]) -> 
None:
+def container_run(platform: str, docker_binary: str, command: List[str], 
dry_run: bool = False, into_container: bool = False) -> str:
     tag = get_docker_tag(platform)
     mx_root = get_mxnet_root()
-    local_build_folder = '{}/build'.format(mx_root)
+    local_build_folder = buildir()
     # We need to create it first, otherwise it will be created by the docker 
daemon with root only permissions
     os.makedirs(local_build_folder, exist_ok=True)
-    logging.info("Running %s in container %s", command, tag)
     runlist = [docker_binary, 'run', '--rm',
         '-v', "{}:/work/mxnet".format(mx_root), # mount mxnet root
         '-v', "{}:/work/build".format(local_build_folder), # mount mxnet/build 
for storing build artifacts
@@ -99,16 +101,32 @@ def container_run(platform: str, docker_binary: str, 
command: List[str]) -> None
         tag]
     runlist.extend(command)
     cmd = ' '.join(runlist)
-    logging.info("Executing: %s", cmd)
-    ret = call(runlist)
-    if ret != 0:
-        logging.error("Running of command in container failed: %s", cmd)
-        into_cmd = deepcopy(runlist)
-        idx = into_cmd.index('-u') + 2
-        into_cmd[idx:idx] = ['-ti', '--entrypoint', 'bash']
-        logging.error("You can try to get into the container by using the 
following command: %s", ' '.join(into_cmd))
+    if not dry_run and not into_container:
+        logging.info("Running %s in container %s", command, tag)
+        logging.info("Executing: %s", cmd)
+        ret = call(runlist)
+
+    into_cmd = deepcopy(runlist)
+    idx = into_cmd.index('-u') + 2
+    into_cmd[idx:idx] = ['-ti', '--entrypoint', '/bin/bash']
+    docker_run_cmd = ' '.join(into_cmd)
+    if not dry_run and into_container:
+        check_call(into_cmd)
+
+    if not dry_run and ret != 0:
+        logging.error("Running of command in container failed (%s): %s", ret, 
cmd)
+        logging.error("You can try to get into the container by using the 
following command: %s", docker_run_cmd)
         raise subprocess.CalledProcessError(ret, cmd)
 
+    return docker_run_cmd
+
+def list_platforms():
+    platforms = get_platforms()
+    print("\nSupported platforms:\n")
+    print('\n'.join(platforms))
+    print()
+
+
 def main() -> int:
     # We need to be in the same directory than the script so the commands in 
the dockerfiles work as
     # expected. But the script can be invoked from a different path
@@ -121,13 +139,18 @@ def main() -> int:
 
     logging.basicConfig(format='{}: %(asctime)-15s 
%(message)s'.format(script_name()))
 
-    parser = argparse.ArgumentParser()
+    parser = argparse.ArgumentParser(description="""Utility for building and 
testing MXNet on docker
+    containers""",epilog="")
     parser.add_argument("-p", "--platform",
                         help="platform",
                         type=str)
 
-    parser.add_argument("-b", "--build",
-                        help="Build the container",
+    parser.add_argument("--build-only",
+                        help="Only build the container, don't build the 
project",
+                        action='store_true')
+
+    parser.add_argument("-a", "--all",
+                        help="build for all platforms",
                         action='store_true')
 
     parser.add_argument("-n", "--nvidiadocker",
@@ -138,6 +161,14 @@ def main() -> int:
                         help="List platforms",
                         action='store_true')
 
+    parser.add_argument("--print-docker-run",
+                        help="print docker run command for manual inspection",
+                        action='store_true')
+
+    parser.add_argument("-i", "--into-container",
+                        help="go in a shell inside the container",
+                        action='store_true')
+
     parser.add_argument("command",
                         help="command to run in the container",
                         nargs='*', action='append', type=str)
@@ -146,31 +177,74 @@ def main() -> int:
     command = list(chain(*args.command))
     docker_binary = get_docker_binary(args.nvidiadocker)
 
+    print("into container: {}".format(args.into_container))
     if args.list:
-        platforms = get_platforms()
-        print(platforms)
+        list_platforms()
 
     elif args.platform:
         platform = args.platform
-        if args.build:
-            build_docker(platform, docker_binary)
+        build_docker(platform, docker_binary)
+        if args.build_only:
+            logging.warn("Container was just built. Exiting due to 
build-only.")
+            return 0
+
         tag = get_docker_tag(platform)
         if command:
             container_run(platform, docker_binary, command)
+        elif args.print_docker_run:
+            print(container_run(platform, docker_binary, [], True))
+        elif args.into_container:
+            container_run(platform, docker_binary, [], False, True)
         else:
             cmd = ["/work/mxnet/ci/docker/runtime_functions.sh", 
"build_{}".format(platform)]
             logging.info("No command specified, trying default build: %s", ' 
'.join(cmd))
             container_run(platform, docker_binary, cmd)
 
-    else:
+    elif args.all:
         platforms = get_platforms()
         logging.info("Building for all architectures: {}".format(platforms))
         logging.info("Artifacts will be produced in the build/ directory.")
         for platform in platforms:
             build_docker(platform, docker_binary)
+            if args.build_only:
+                continue
             cmd = ["/work/mxnet/ci/docker/runtime_functions.sh", 
"build_{}".format(platform)]
-            logging.info("No command specified, trying default build: %s", ' 
'.join(cmd))
+            shutil.rmtree(buildir(), ignore_errors=True)
             container_run(platform, docker_binary, cmd)
+            plat_buildir = os.path.join(get_mxnet_root(), 
"build_{}".format(platform))
+            shutil.move(buildir(), plat_buildir)
+            logging.info("Built files left in: %s", plat_buildir)
+
+
+    else:
+        parser.print_help()
+        list_platforms()
+        print("""
+Examples:
+
+./build.py -p armv7
+
+    Will build a docker container with cross compilation tools and build MXNet 
for armv7 by
+    running: ci/docker/runtime_functions.sh build_armv7 inside the container.
+
+./build.py -p armv7 ls
+
+    Will execute the given command inside the armv7 container
+
+./build.py -p armv7 --print-docker-run
+
+    Will print a docker run command to get inside the container in an 
interactive shell
+
+./build.py -p armv7 --into-container
+
+    Will execute a shell into the container
+
+./build.py -a
+
+    Builds for all platforms and leaves artifacts in build_<platform>
+
+    """)
+
 
     return 0
 

-- 
To stop receiving notification emails like this one, please contact
marcoab...@apache.org.

Reply via email to