This is an automated email from the ASF dual-hosted git repository.

lausen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 319e6c1  CI: Switch to cmake builds for majority of tests (#17645)
319e6c1 is described below

commit 319e6c180edef80d5535e5744a4ac6c84cc143e4
Author: Leonard Lausen <[email protected]>
AuthorDate: Fri Feb 28 11:59:22 2020 -0800

    CI: Switch to cmake builds for majority of tests (#17645)
    
    The following Makefile based builds are preserved
    1) staticbuild scripts
    2) Docs builds. Language binding specific build logic requires further 
changes
    3) Jetson build. Jetpack 3.3 toolchain based on Cuda 9.0 causes 'Internal
       Compiler Error (codegen): "there was an error in verifying the lgenfe
       output!"' errors with cmake. This seems to be a known issue in Cuda 9.0 
and
       we need to update Jetpack toolchain to work around it.
    4) MKL builds. Waiting for fix of #17641
    
    All Makefile based builds are marked with a "Makefile" postfix in the title.
    
    Improvements to CMake build
    - Enable -Werror for RelWithDebugInfo build in analogy to "make DEV=1" build
    - Add USE_LIBJPEG_TURBO to CMake build
    - Improve finding Python 3 executable
    
    Changes to CI setup
    - Install protobuf and zmq where missing
    - Install up-to-date CMake on Centos 7
    - Don't use RelWithDebInfo on Android builds, as gcc 4.9 throws
      -Wdelete-non-virtual-dtor
    
    Code changes
    - Disable warnings introduced by GCC7 at via #pragma GCC diagnostic
---
 .github/workflows/os_x_staticbuild.yml  |   2 +-
 3rdparty/mshadow/CMakeLists.txt         |   9 +-
 3rdparty/mshadow/mshadow/base.h         |   9 +-
 CMakeLists.txt                          |  40 +--
 R-package/Makefile                      |   2 +-
 ci/docker/install/centos7_base.sh       |  15 +-
 ci/docker/install/centos7_core.sh       |  15 +-
 ci/docker/install/ubuntu_core.sh        |   2 +
 ci/docker/runtime_functions.sh          | 423 +++++++++++++++-----------------
 ci/jenkins/Jenkins_steps.groovy         | 186 ++++++++------
 ci/jenkins/Jenkinsfile_centos_cpu       |   5 +-
 ci/jenkins/Jenkinsfile_unix_cpu         |   2 +
 ci/jenkins/Jenkinsfile_unix_gpu         |   3 +-
 cmake/BuildCythonModules.cmake          |  27 +-
 config/distribution/darwin_cpu.cmake    |   1 +
 config/distribution/linux_cpu.cmake     |   1 +
 config/distribution/linux_cu100.cmake   |   1 +
 config/distribution/linux_cu101.cmake   |   1 +
 config/distribution/linux_cu102.cmake   |   1 +
 config/distribution/linux_cu75.cmake    |   1 +
 config/distribution/linux_cu80.cmake    |   1 +
 config/distribution/linux_cu90.cmake    |   1 +
 config/distribution/linux_cu91.cmake    |   1 +
 config/distribution/linux_cu92.cmake    |   1 +
 src/operator/mshadow_op.h               |  23 ++
 src/operator/numpy/linalg/np_norm-inl.h |  10 +
 26 files changed, 433 insertions(+), 350 deletions(-)

diff --git a/.github/workflows/os_x_staticbuild.yml 
b/.github/workflows/os_x_staticbuild.yml
index eabe88f..6e313a0 100644
--- a/.github/workflows/os_x_staticbuild.yml
+++ b/.github/workflows/os_x_staticbuild.yml
@@ -10,7 +10,7 @@ jobs:
         uses: actions/checkout@v2
       - name: Install Dependencies
         run: |
-          brew install nasm automake ninja libtool
+          brew install nasm automake ninja libtool cmake pkgconfig protobuf
       - name: Build project
         run: |
           git --version
diff --git a/3rdparty/mshadow/CMakeLists.txt b/3rdparty/mshadow/CMakeLists.txt
index 3b898a4..3a347fd 100644
--- a/3rdparty/mshadow/CMakeLists.txt
+++ b/3rdparty/mshadow/CMakeLists.txt
@@ -13,6 +13,12 @@ add_library(mshadow INTERFACE)
 file(GLOB_RECURSE MSHADOWSOURCE "mshadow/*.h")
 target_include_directories(mshadow INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}")
 target_sources(mshadow INTERFACE ${MSHADOWSOURCE})
+if(UNIX)
+  target_compile_options(mshadow INTERFACE
+    "$<$<COMPILE_LANGUAGE:CXX>:-Wno-unused-parameter>"
+    "$<$<COMPILE_LANGUAGE:CXX>:-Wno-unknown-pragmas>"
+    "$<$<COMPILE_LANGUAGE:CXX>:-Wno-unused-local-typedefs>")
+endif()
 
 if(USE_CUDA)
   enable_language(CUDA)
@@ -67,7 +73,8 @@ else()
 endif()
 
 set(mshadow_LINT_DIRS mshadow mshadow-ps)
+find_package(Python3)
 add_custom_target(mshadow_lint COMMAND ${CMAKE_COMMAND} -DMSVC=${MSVC}
-  -DPYTHON_EXECUTABLE=${PYTHON_EXECUTABLE} -DLINT_DIRS=${mshadow_LINT_DIRS}
+  -DPYTHON_EXECUTABLE=${Python3_EXECUTABLE} -DLINT_DIRS=${mshadow_LINT_DIRS}
   -DPROJECT_SOURCE_DIR=${PROJECT_SOURCE_DIR} -DPROJECT_NAME=mshadow
   -P ${PROJECT_SOURCE_DIR}/../dmlc-core/cmake/lint.cmake)
diff --git a/3rdparty/mshadow/mshadow/base.h b/3rdparty/mshadow/mshadow/base.h
index 28fbd86..a998384 100755
--- a/3rdparty/mshadow/mshadow/base.h
+++ b/3rdparty/mshadow/mshadow/base.h
@@ -18,12 +18,13 @@
 #define NOMINMAX
 #endif
 #endif
-#include <cmath>
-#include <cstdio>
+#include <algorithm>
 #include <cfloat>
 #include <climits>
-#include <algorithm>
+#include <cmath>
+#include <cstdio>
 #include <functional>
+#include <limits>
 #include <sstream>
 #include <string>
 
@@ -839,7 +840,7 @@ MSHADOW_XINLINE bool MaxValue<bool>(void) {
 /*! \brief maximum value of uint32_t */
 template<>
 MSHADOW_XINLINE uint32_t MaxValue<uint32_t>(void) {
-  return -1;
+  return std::numeric_limits<uint32_t>::max();
 }
 
 /*!
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4731663..526a1da 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -51,6 +51,7 @@ else()
 endif()
 option(USE_GPERFTOOLS "Build with GPerfTools support" OFF)
 option(USE_JEMALLOC "Build with Jemalloc support" OFF)
+option(USE_LIBJPEG_TURBO "Use libjpeg-turbo" OFF)
 option(USE_DIST_KVSTORE "Build with DIST_KVSTORE support" OFF)
 option(USE_PLUGINS_WARPCTC "Use WARPCTC Plugins" OFF)
 option(USE_PLUGIN_CAFFE "Use Caffe Plugin" OFF)
@@ -381,6 +382,16 @@ if(USE_JEMALLOC)
   endif()
 endif()
 
+if(USE_LIBJPEG_TURBO)
+  find_package(PkgConfig REQUIRED)
+  pkg_search_module(TURBOJPEG REQUIRED libturbojpeg)
+  include_directories(SYSTEM ${TURBOJPEG_INCLUDE_DIRS})
+  list(APPEND mxnet_LINKER_LIBS ${TURBOJPEG_LINK_LIBRARIES})
+  add_definitions(-DMXNET_USE_LIBJPEG_TURBO=1)
+else()
+  add_definitions(-DMXNET_USE_LIBJPEG_TURBO=0)
+endif()
+
 # ---[ OpenCV
 if(USE_OPENCV)
   find_package(OpenCV COMPONENTS core highgui imgproc imgcodecs)
@@ -661,7 +672,7 @@ add_subdirectory("3rdparty/mshadow")
 
 set(MXNET_INSTALL_TARGETS mxnet)
 if(UNIX)
-  string(APPEND CMAKE_CUDA_FLAGS "${CUDA_ARCH_FLAGS_SPACES}")
+  string(APPEND CMAKE_CUDA_FLAGS " ${CUDA_ARCH_FLAGS_SPACES}")
   # Create dummy file since we want an empty shared library before linking
   set(DUMMY_SOURCE ${CMAKE_BINARY_DIR}/dummy.c)
   file(WRITE ${DUMMY_SOURCE} "")
@@ -673,6 +684,15 @@ if(UNIX)
   target_link_libraries(mxnet PRIVATE mxnet_static)
   target_link_libraries(mxnet_static PUBLIC ${CMAKE_DL_LIBS})
   set_target_properties(mxnet_static PROPERTIES OUTPUT_NAME mxnet)
+  if(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
+    target_compile_options(mxnet_static PRIVATE 
"$<$<COMPILE_LANGUAGE:CXX>:-Werror>")
+    # Ignore erroneous compiler warnings:
+    # 1) variables used in '#pragma omp parallel' are considered unused
+    target_compile_options(mxnet_static PRIVATE 
"$<$<COMPILE_LANGUAGE:CXX>:-Wno-error=unused-variable>")
+    if(USE_CUDA)
+      string(APPEND CMAKE_CUDA_FLAGS " -Werror cross-execution-space-call")
+    endif()
+  endif()
 elseif(MSVC)
   if(USE_CUDA)
     if(MSVC)
@@ -708,7 +728,7 @@ elseif(MSVC)
           COMMAND gen_warp $<TARGET_FILE:mxnet_${mxnet_first_arch}> 
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/ DEPENDS 
$<TARGET_FILE:mxnet_${mxnet_first_arch}>)
       else(USE_SPLIT_ARCH_DLL)
         string(REPLACE ";" " " NVCC_FLAGS_ARCH "${NVCC_FLAGS_ARCH}")
-        set(CMAKE_CUDA_FLAGS "${CUDA_ARCH_FLAGS_SPACES}")
+        set(CMAKE_CUDA_FLAGS " ${CUDA_ARCH_FLAGS_SPACES}")
         add_library(mxnet SHARED ${SOURCE})
         target_link_libraries(mxnet PUBLIC mshadow)
         target_compile_options(
@@ -778,14 +798,7 @@ endfunction()
 if(USE_TVM_OP)
   list(APPEND mxnet_LINKER_LIBS 
${CMAKE_CURRENT_BINARY_DIR}/3rdparty/tvm/libtvm_runtime.so)
   BuildTVMOP()
-  if(NOT Python3_EXECUTABLE)
-    find_package(PythonInterp 3 REQUIRED)
-    set(Python3_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE FILEPATH "Path to the 
python3 executable")
-    if(NOT Python3_EXECUTABLE)
-      message(FATAL_ERROR "No python3 interpreter found to build TVM 
operators")
-    endif()
-  endif()
-
+  find_package(Python3 REQUIRED)
   set(TVM_OP_COMPILE_OPTIONS "-o${CMAKE_CURRENT_BINARY_DIR}" "--config" 
"${CMAKE_CURRENT_BINARY_DIR}/tvmop.conf" "-L" 
"${CMAKE_CURRENT_BINARY_DIR}/3rdparty/tvm")
   if(USE_CUDA)
     set(TVM_OP_COMPILE_OPTIONS "${TVM_OP_COMPILE_OPTIONS}" "--cuda-arch" 
"\"${CUDA_ARCH_FLAGS}\"")
@@ -904,13 +917,10 @@ endif()
 add_subdirectory(tests)
 
 # ---[ Linter target
-if(MSVC)
-  find_package(PythonInterp)
-  set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE FILEPATH "Path to the 
python executable")
-endif()
+find_package(Python3)
 set(LINT_DIRS "include src plugin cpp-package tests")
 set(EXCLUDE_PATH "src/operator/contrib/ctc_include")
-add_custom_target(mxnet_lint COMMAND ${CMAKE_COMMAND} -DMSVC=${MSVC} 
-DPYTHON_EXECUTABLE=${PYTHON_EXECUTABLE} -DLINT_DIRS=${LINT_DIRS} 
-DPROJECT_SOURCE_DIR=${CMAKE_CURRENT_SOURCE_DIR} -DPROJECT_NAME=mxnet 
-DEXCLUDE_PATH=${EXCLUDE_PATH} -P 
${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/dmlc-core/cmake/lint.cmake)
+add_custom_target(mxnet_lint COMMAND ${CMAKE_COMMAND} -DMSVC=${MSVC} 
-DPYTHON_EXECUTABLE=${Python3_EXECUTABLE} -DLINT_DIRS=${LINT_DIRS} 
-DPROJECT_SOURCE_DIR=${CMAKE_CURRENT_SOURCE_DIR} -DPROJECT_NAME=mxnet 
-DEXCLUDE_PATH=${EXCLUDE_PATH} -P 
${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/dmlc-core/cmake/lint.cmake)
 
 if(BUILD_CYTHON_MODULES)
   include(cmake/BuildCythonModules.cmake)
diff --git a/R-package/Makefile b/R-package/Makefile
index 6824903..5a8bc42 100644
--- a/R-package/Makefile
+++ b/R-package/Makefile
@@ -1,5 +1,5 @@
 rcpplint:
-       3rdparty/dmlc-core/scripts/lint.py mxnet-rcpp ${LINT_LANG} R-package/src
+       ./3rdparty/dmlc-core/scripts/lint.py mxnet-rcpp all R-package/src
 
 rpkg:
        mkdir -p R-package/inst/libs
diff --git a/ci/docker/install/centos7_base.sh 
b/ci/docker/install/centos7_base.sh
index 3b84aeb..c5f860e 100755
--- a/ci/docker/install/centos7_base.sh
+++ b/ci/docker/install/centos7_base.sh
@@ -27,7 +27,20 @@ yum -y install epel-release
 yum -y install git
 yum -y install wget
 yum -y install make
-yum -y install cmake
 yum -y install unzip
 yum -y install ninja-build
 yum -y install gcc-gfortran
+yum -y install protobuf-compiler
+yum -y install protobuf-devel
+yum -y install zeromq-devel
+
+# Centos 7 only provides ninja-build
+ln -s /usr/bin/ninja-build /usr/bin/ninja
+
+# CMake 3.13.2+ is required
+mkdir /opt/cmake && cd /opt/cmake
+wget -nv https://cmake.org/files/v3.13/cmake-3.13.5-Linux-x86_64.sh
+sh cmake-3.13.5-Linux-x86_64.sh --prefix=/opt/cmake --skip-license
+ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake
+rm cmake-3.13.5-Linux-x86_64.sh
+cmake --version
diff --git a/ci/docker/install/centos7_core.sh 
b/ci/docker/install/centos7_core.sh
index 577f9db..fbdb239 100755
--- a/ci/docker/install/centos7_core.sh
+++ b/ci/docker/install/centos7_core.sh
@@ -30,10 +30,23 @@ yum -y install atlas-devel # Provide clbas headerfiles
 yum -y install openblas-devel
 yum -y install lapack-devel
 yum -y install opencv-devel
+yum -y install protobuf-compiler
+yum -y install protobuf-devel
+yum -y install zeromq-devel
 yum -y install openssl-devel
 yum -y install gcc-c++-4.8.*
 yum -y install make
-yum -y install cmake
 yum -y install wget
 yum -y install unzip
 yum -y install ninja-build
+
+# Centos 7 only provides ninja-build
+ln -s /usr/bin/ninja-build /usr/bin/ninja
+
+# CMake 3.13.2+ is required
+mkdir /opt/cmake && cd /opt/cmake
+wget -nv https://cmake.org/files/v3.13/cmake-3.13.5-Linux-x86_64.sh
+sh cmake-3.13.5-Linux-x86_64.sh --prefix=/opt/cmake --skip-license
+ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake
+rm cmake-3.13.5-Linux-x86_64.sh
+cmake --version
diff --git a/ci/docker/install/ubuntu_core.sh b/ci/docker/install/ubuntu_core.sh
index 2773aa2..9ff3ac7 100755
--- a/ci/docker/install/ubuntu_core.sh
+++ b/ci/docker/install/ubuntu_core.sh
@@ -47,6 +47,8 @@ apt-get install -y \
     zlib1g-dev \
     libedit-dev \
     libxml2-dev \
+    libprotobuf-dev \
+    protobuf-compiler \
     ninja-build \
     software-properties-common \
     sudo \
diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh
index 0c7630f..b4394c0 100755
--- a/ci/docker/runtime_functions.sh
+++ b/ci/docker/runtime_functions.sh
@@ -156,70 +156,50 @@ gather_licenses() {
 
 build_ubuntu_cpu_release() {
     set -ex
-
-    build_ccache_wrappers
-
-    make  \
-        DEV=0                         \
-        ENABLE_TESTCOVERAGE=0         \
-        USE_CPP_PACKAGE=0             \
-        USE_MKLDNN=0                  \
-        USE_BLAS=openblas             \
-        USE_SIGNAL_HANDLER=1          \
-        -j$(nproc)
+    cd /work/build
+    cmake \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_MKLDNN=OFF \
+        -DUSE_CUDA=OFF \
+        -G Ninja /work/mxnet
+    ninja
 }
 
 build_ubuntu_cpu_mkldnn_release() {
     set -ex
-
-    build_ccache_wrappers
-
-    make  \
-        DEV=0                         \
-        ENABLE_TESTCOVERAGE=0         \
-        USE_CPP_PACKAGE=0             \
-        USE_MKLDNN=1                  \
-        USE_BLAS=openblas             \
-        USE_SIGNAL_HANDLER=1          \
-        -j$(nproc)
+    cd /work/build
+    cmake \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_MKLDNN=ON \
+        -DUSE_CUDA=OFF \
+        -G Ninja /work/mxnet
+    ninja
 }
 
 build_ubuntu_gpu_release() {
     set -ex
-    # unfortunately this build has problems in 3rdparty dependencies with 
ccache and make
-    # build_ccache_wrappers
-
-    make \
-        DEV=0                                     \
-        ENABLE_TESTCOVERAGE=0                     \
-        USE_BLAS=openblas                         \
-        USE_MKLDNN=0                              \
-        USE_CUDA=1                                \
-        USE_CUDA_PATH=/usr/local/cuda             \
-        USE_CUDNN=1                               \
-        USE_CPP_PACKAGE=0                         \
-        USE_DIST_KVSTORE=1                        \
-        USE_SIGNAL_HANDLER=1                      \
-        -j$(nproc)
+    cd /work/build
+    cmake \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_MKLDNN=OFF \
+        -DUSE_DIST_KVSTORE=ON \
+        -DUSE_CUDA=ON \
+        -DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
+        -G Ninja /work/mxnet
+    ninja
 }
 
 build_ubuntu_gpu_mkldnn_release() {
     set -ex
-    # unfortunately this build has problems in 3rdparty dependencies with 
ccache and make
-    # build_ccache_wrappers
-
-    make \
-        DEV=0                                     \
-        ENABLE_TESTCOVERAGE=0                     \
-        USE_BLAS=openblas                         \
-        USE_MKLDNN=1                              \
-        USE_CUDA=1                                \
-        USE_CUDA_PATH=/usr/local/cuda             \
-        USE_CUDNN=1                               \
-        USE_CPP_PACKAGE=0                         \
-        USE_DIST_KVSTORE=1                        \
-        USE_SIGNAL_HANDLER=1                      \
-        -j$(nproc)
+    cd /work/build
+    cmake \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_MKLDNN=ON \
+        -DUSE_DIST_KVSTORE=ON \
+        -DUSE_CUDA=ON \
+        -DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
+        -G Ninja /work/mxnet
+    ninja
 }
 
 # Compiles the dynamic mxnet library
@@ -265,7 +245,6 @@ build_jetson() {
 
 build_armv6() {
     set -ex
-    pushd .
     cd /work/build
 
     # Lapack functionality will be included and statically linked to openblas.
@@ -291,12 +270,10 @@ build_armv6() {
 
     ninja
     build_wheel
-    popd
 }
 
 build_armv7() {
     set -ex
-    pushd .
     cd /work/build
 
     # Lapack functionality will be included and statically linked to openblas.
@@ -321,11 +298,11 @@ build_armv7() {
 
     ninja
     build_wheel
-    popd
 }
 
 build_armv8() {
     build_ccache_wrappers
+    cd /work/build
     cmake \
         -DUSE_CUDA=OFF\
         -DSUPPORT_F16C=OFF\
@@ -358,7 +335,6 @@ build_android_armv7() {
         -DUSE_OPENCV=OFF\
         -DUSE_OPENMP=OFF\
         -DUSE_SIGNAL_HANDLER=ON\
-        -DCMAKE_BUILD_TYPE=RelWithDebInfo\
         -DUSE_MKL_IF_AVAILABLE=OFF\
         -G Ninja /work/mxnet
     ninja
@@ -376,7 +352,6 @@ build_android_armv8() {
         -DUSE_OPENCV=OFF\
         -DUSE_OPENMP=OFF\
         -DUSE_SIGNAL_HANDLER=ON\
-        -DCMAKE_BUILD_TYPE=RelWithDebInfo\
         -DUSE_MKL_IF_AVAILABLE=OFF\
         -G Ninja /work/mxnet
     ninja
@@ -384,6 +359,19 @@ build_android_armv8() {
 
 build_centos7_cpu() {
     set -ex
+    cd /work/build
+    cmake \
+        -DCMAKE_BUILD_TYPE="RelWithDebInfo" \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_MKLDNN=OFF \
+        -DUSE_DIST_KVSTORE=ON \
+        -DUSE_CUDA=OFF \
+        -G Ninja /work/mxnet
+    ninja
+}
+
+build_centos7_cpu_make() {
+    set -ex
     cd /work/mxnet
     export CC="ccache gcc"
     export CXX="ccache g++"
@@ -400,6 +388,7 @@ build_centos7_cpu() {
 }
 
 build_amzn_linux_cpu() {
+    set -ex
     cd /work/build
     build_ccache_wrappers
     cmake \
@@ -417,36 +406,28 @@ build_amzn_linux_cpu() {
 
 build_centos7_mkldnn() {
     set -ex
-    cd /work/mxnet
-    export CC="ccache gcc"
-    export CXX="ccache g++"
-    build_ccache_wrappers
-    make \
-        DEV=1 \
-        USE_LAPACK=1 \
-        USE_LAPACK_PATH=/usr/lib64/liblapack.so \
-        USE_BLAS=openblas \
-        USE_SIGNAL_HANDLER=1 \
-        -j$(nproc)
+    cd /work/build
+    cmake \
+        -DCMAKE_BUILD_TYPE="RelWithDebInfo" \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_MKLDNN=ON \
+        -DUSE_CUDA=OFF \
+        -G Ninja /work/mxnet
+    ninja
 }
 
 build_centos7_gpu() {
     set -ex
-    cd /work/mxnet
-    # unfortunately this build has problems in 3rdparty dependencies with 
ccache and make
-    build_ccache_wrappers
-    make \
-        DEV=1                                     \
-        USE_LAPACK=1                              \
-        USE_LAPACK_PATH=/usr/lib64/liblapack.so   \
-        USE_BLAS=openblas                         \
-        USE_MKLDNN=0                              \
-        USE_CUDA=1                                \
-        USE_CUDA_PATH=/usr/local/cuda             \
-        USE_CUDNN=1                               \
-        USE_DIST_KVSTORE=1                        \
-        CUDA_ARCH="$CI_CUDA_COMPUTE_CAPABILITIES" \
-        -j$(nproc)
+    cd /work/build
+    cmake \
+        -DCMAKE_BUILD_TYPE="RelWithDebInfo" \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_MKLDNN=ON \
+        -DUSE_CUDA=ON \
+        -DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
+        -DUSE_DIST_KVSTORE=ON\
+        -G Ninja /work/mxnet
+    ninja
 }
 
 build_ubuntu_cpu() {
@@ -455,6 +436,22 @@ build_ubuntu_cpu() {
 
 build_ubuntu_cpu_openblas() {
     set -ex
+    cd /work/build
+    cmake \
+        -DCMAKE_BUILD_TYPE="RelWithDebInfo" \
+        -DUSE_TVM_OP=ON \
+        -DUSE_CPP_PACKAGE=ON \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_MKLDNN=OFF \
+        -DUSE_CUDA=OFF \
+        -DUSE_DIST_KVSTORE=ON \
+        -DBUILD_CYTHON_MODULES=ON \
+        -G Ninja /work/mxnet
+    ninja
+}
+
+build_ubuntu_cpu_openblas_make() {
+    set -ex
     export CC="gcc"
     export CXX="g++"
     build_ccache_wrappers
@@ -489,13 +486,11 @@ build_ubuntu_cpu_mkl() {
 
 build_ubuntu_cpu_cmake_debug() {
     set -ex
-    pushd .
     cd /work/build
     build_ccache_wrappers
     cmake \
         -DUSE_CUDA=OFF \
         -DUSE_TVM_OP=ON \
-        -DPython3_EXECUTABLE=/usr/bin/python3 \
         -DUSE_MKL_IF_AVAILABLE=OFF \
         -DUSE_OPENMP=OFF \
         -DUSE_OPENCV=ON \
@@ -503,20 +498,16 @@ build_ubuntu_cpu_cmake_debug() {
         -DCMAKE_BUILD_TYPE=Debug \
         -G Ninja \
         /work/mxnet
-
     ninja
-    popd
 }
 
 build_ubuntu_cpu_cmake_no_tvm_op() {
     set -ex
-    pushd .
     cd /work/build
     build_ccache_wrappers
     cmake \
         -DUSE_CUDA=OFF \
         -DUSE_TVM_OP=OFF \
-        -DPython3_EXECUTABLE=/usr/bin/python3 \
         -DUSE_MKL_IF_AVAILABLE=OFF \
         -DUSE_OPENMP=OFF \
         -DUSE_OPENCV=ON \
@@ -526,13 +517,11 @@ build_ubuntu_cpu_cmake_no_tvm_op() {
         /work/mxnet
 
     ninja
-    popd
 }
 
 build_ubuntu_cpu_cmake_asan() {
     set -ex
 
-    pushd .
     cd /work/build
     export CXX=g++-8
     export CC=gcc-8
@@ -555,99 +544,80 @@ build_ubuntu_cpu_cmake_asan() {
     ASAN_OPTIONS=detect_leaks=0 \
     LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libasan.so.5 \
     make -j $(nproc) mlp_cpu
-    popd
 }
 
 build_ubuntu_cpu_clang39() {
     set -ex
-    export CXX=clang++-3.9
-    export CC=clang-3.9
-    build_ccache_wrappers
-    make \
-        USE_CPP_PACKAGE=1             \
-        USE_BLAS=openblas             \
-        USE_MKLDNN=0                  \
-        USE_OPENMP=0                  \
-        USE_DIST_KVSTORE=1            \
-        -j$(nproc)
+    cd /work/build
+    CXX=clang++-3.9 CC=clang-3.9 cmake \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_MKLDNN=OFF \
+        -DUSE_CUDA=OFF \
+        -DUSE_OPENMP=OFF \
+        -DUSE_DIST_KVSTORE=ON \
+        -DUSE_CPP_PACKAGE=ON \
+        -G Ninja /work/mxnet
+    ninja
 }
 
 build_ubuntu_cpu_clang60() {
     set -ex
-
-    export CXX=clang++-6.0
-    export CC=clang-6.0
-
-    build_ccache_wrappers
-
-    make  \
-        USE_CPP_PACKAGE=1             \
-        USE_BLAS=openblas             \
-        USE_MKLDNN=0                  \
-        USE_OPENMP=1                  \
-        USE_DIST_KVSTORE=1            \
-        -j$(nproc)
+    cd /work/build
+    CXX=clang++-6.0 CC=clang-6.0 cmake \
+       -DUSE_MKL_IF_AVAILABLE=OFF \
+       -DUSE_MKLDNN=OFF \
+       -DUSE_CUDA=OFF \
+       -DUSE_OPENMP=ON \
+       -DUSE_DIST_KVSTORE=ON \
+       -DUSE_CPP_PACKAGE=ON \
+       -G Ninja /work/mxnet
+    ninja
 }
 
 build_ubuntu_cpu_clang_tidy() {
     set -ex
-
-    export CXX=clang++-6.0
-    export CC=clang-6.0
-    export CLANG_TIDY=/usr/lib/llvm-6.0/share/clang/run-clang-tidy.py
-
-    pushd .
     cd /work/build
-    build_ccache_wrappers
-    cmake \
-        -DUSE_CUDA=OFF \
-        -DUSE_MKLDNN=OFF \
-        -DUSE_MKL_IF_AVAILABLE=OFF \
-        -DUSE_OPENCV=ON \
-        -DCMAKE_BUILD_TYPE=Debug \
-        -G Ninja \
-        -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
-        /work/mxnet
-
+    export CLANG_TIDY=/usr/lib/llvm-6.0/share/clang/run-clang-tidy.py
+    CXX=clang++-6.0 CC=clang-6.0 cmake \
+       -DUSE_MKL_IF_AVAILABLE=OFF \
+       -DUSE_MKLDNN=OFF \
+       -DUSE_CUDA=OFF \
+       -DCMAKE_BUILD_TYPE=Debug \
+       -DUSE_DIST_KVSTORE=ON \
+       -DUSE_CPP_PACKAGE=ON \
+       -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
+       -G Ninja /work/mxnet
     ninja
     cd /work/mxnet
     $CLANG_TIDY -p /work/build -j $(nproc) -clang-tidy-binary clang-tidy-6.0 
/work/mxnet/src
-    popd
 }
 
 build_ubuntu_cpu_clang39_mkldnn() {
     set -ex
-
-    export CXX=clang++-3.9
-    export CC=clang-3.9
-
-    build_ccache_wrappers
-
-    make \
-        USE_CPP_PACKAGE=1             \
-        USE_BLAS=openblas             \
-        USE_OPENMP=0                  \
-        USE_SIGNAL_HANDLER=1          \
-        -j$(nproc)
+    cd /work/build
+    CXX=clang++-3.9 CC=clang-3.9 cmake \
+       -DUSE_MKL_IF_AVAILABLE=OFF \
+       -DUSE_MKLDNN=ON \
+       -DUSE_CUDA=OFF \
+       -DUSE_CPP_PACKAGE=ON \
+       -DUSE_OPENMP=OFF \
+       -G Ninja /work/mxnet
+    ninja
 }
 
 build_ubuntu_cpu_clang60_mkldnn() {
     set -ex
-
-    export CXX=clang++-6.0
-    export CC=clang-6.0
-
-    build_ccache_wrappers
-
-    make \
-        USE_CPP_PACKAGE=1             \
-        USE_BLAS=openblas             \
-        USE_OPENMP=1                  \
-        USE_SIGNAL_HANDLER=1          \
-        -j$(nproc)
+    cd /work/build
+    CXX=clang++-6.0 CC=clang-6.0 cmake \
+       -DUSE_MKL_IF_AVAILABLE=OFF \
+       -DUSE_MKLDNN=ON \
+       -DUSE_CUDA=OFF \
+       -DUSE_CPP_PACKAGE=ON \
+       -G Ninja /work/mxnet
+    ninja
 }
 
-build_ubuntu_cpu_mkldnn() {
+build_ubuntu_cpu_mkldnn_make() {
     set -ex
 
     build_ccache_wrappers
@@ -661,9 +631,22 @@ build_ubuntu_cpu_mkldnn() {
         -j$(nproc)
 }
 
-build_ubuntu_cpu_mkldnn_mkl() {
+build_ubuntu_cpu_mkldnn() {
     set -ex
+    cd /work/build
+    cmake \
+        -DCMAKE_BUILD_TYPE="RelWithDebInfo" \
+       -DUSE_MKL_IF_AVAILABLE=OFF \
+       -DUSE_TVM_OP=ON \
+       -DUSE_MKLDNN=ON \
+       -DUSE_CUDA=OFF \
+       -DUSE_CPP_PACKAGE=ON \
+       -G Ninja /work/mxnet
+    ninja
+}
 
+build_ubuntu_cpu_mkldnn_mkl() {
+    set -ex
     build_ccache_wrappers
 
     make  \
@@ -735,41 +718,52 @@ build_ubuntu_gpu_tensorrt() {
 
 build_ubuntu_gpu_mkldnn() {
     set -ex
-
-    build_ccache_wrappers
-
-    make  \
-        DEV=1                                     \
-        USE_CPP_PACKAGE=1                         \
-        USE_BLAS=openblas                         \
-        USE_CUDA=1                                \
-        USE_CUDA_PATH=/usr/local/cuda             \
-        USE_CUDNN=1                               \
-        USE_TVM_OP=1                              \
-        CUDA_ARCH="$CI_CUDA_COMPUTE_CAPABILITIES" \
-        USE_SIGNAL_HANDLER=1                      \
-        -j$(nproc)
+    cd /work/build
+    cmake \
+        -DCMAKE_BUILD_TYPE="RelWithDebInfo" \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_TVM_OP=ON \
+        -DUSE_CUDA=ON \
+        -DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
+        -DUSE_CPP_PACKAGE=ON \
+        -G Ninja /work/mxnet
+    ninja
 }
 
 build_ubuntu_gpu_mkldnn_nocudnn() {
     set -ex
-
-    build_ccache_wrappers
-
-    make  \
-        DEV=1                                     \
-        USE_BLAS=openblas                         \
-        USE_CUDA=1                                \
-        USE_CUDA_PATH=/usr/local/cuda             \
-        USE_CUDNN=0                               \
-        USE_TVM_OP=1                              \
-        CUDA_ARCH="$CI_CUDA_COMPUTE_CAPABILITIES" \
-        USE_SIGNAL_HANDLER=1                      \
-        -j$(nproc)
+    cd /work/build
+    cmake \
+        -DCMAKE_BUILD_TYPE="RelWithDebInfo" \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_TVM_OP=ON \
+        -DUSE_CUDA=ON \
+        -DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
+        -DUSE_CUDNN=OFF \
+        -DUSE_CPP_PACKAGE=ON \
+        -G Ninja /work/mxnet
+    ninja
 }
 
 build_ubuntu_gpu_cuda101_cudnn7() {
     set -ex
+    cd /work/build
+    cmake \
+        -DCMAKE_BUILD_TYPE="RelWithDebInfo" \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_TVM_OP=ON \
+        -DUSE_CUDA=ON \
+        -DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
+        -DUSE_CUDNN=ON \
+        -DUSE_MKLDNN=OFF \
+        -DUSE_CPP_PACKAGE=ON \
+        -DBUILD_CYTHON_MODULES=ON \
+        -G Ninja /work/mxnet
+    ninja
+}
+
+build_ubuntu_gpu_cuda101_cudnn7_make() {
+    set -ex
     build_ccache_wrappers
     make \
         DEV=1                                     \
@@ -809,22 +803,19 @@ build_ubuntu_gpu_cuda101_cudnn7_mkldnn_cpp_test() {
 
 build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op() {
     set -ex
-    build_ccache_wrappers
-    make \
-        DEV=1                                     \
-        USE_BLAS=openblas                         \
-        USE_MKLDNN=0                              \
-        USE_CUDA=1                                \
-        USE_CUDA_PATH=/usr/local/cuda             \
-        USE_CUDNN=1                               \
-        USE_TVM_OP=0                              \
-        USE_CPP_PACKAGE=1                         \
-        USE_DIST_KVSTORE=1                        \
-        CUDA_ARCH="$CI_CUDA_COMPUTE_CAPABILITIES" \
-        USE_SIGNAL_HANDLER=1                      \
-        -j$(nproc)
-
-    make cython PYTHON=python3
+    cd /work/build
+    cmake \
+        -DCMAKE_BUILD_TYPE="RelWithDebInfo" \
+        -DUSE_MKL_IF_AVAILABLE=OFF \
+        -DUSE_TVM_OP=OFF \
+        -DUSE_CUDA=ON \
+        -DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
+        -DUSE_CUDNN=ON \
+        -DUSE_MKLDNN=OFF \
+        -DBUILD_CYTHON_MODULES=ON \
+        -DUSE_DIST_KVSTORE=ON \
+        -G Ninja /work/mxnet
+    ninja
 }
 
 build_ubuntu_amalgamation() {
@@ -846,25 +837,6 @@ build_ubuntu_amalgamation_min() {
         MIN=1
 }
 
-build_ubuntu_gpu_cmake_mkldnn() {
-    set -ex
-    cd /work/build
-    build_ccache_wrappers
-    cmake \
-        -DUSE_SIGNAL_HANDLER=ON                 \
-        -DUSE_CUDA=1                            \
-        -DUSE_CUDNN=1                           \
-        -DUSE_TVM_OP=1                          \
-        -DPython3_EXECUTABLE=/usr/bin/python3   \
-        -DUSE_MKLML_MKL=1                       \
-        -DCMAKE_BUILD_TYPE=Release              \
-        -DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-        -G Ninja                                \
-        /work/mxnet
-
-    ninja
-}
-
 build_ubuntu_gpu_cmake() {
     set -ex
     cd /work/build
@@ -874,7 +846,6 @@ build_ubuntu_gpu_cmake() {
         -DUSE_CUDA=ON                           \
         -DUSE_CUDNN=ON                          \
         -DUSE_TVM_OP=ON                         \
-        -DPython3_EXECUTABLE=/usr/bin/python3   \
         -DUSE_MKL_IF_AVAILABLE=OFF              \
         -DUSE_MKLML_MKL=OFF                     \
         -DUSE_MKLDNN=OFF                        \
@@ -897,7 +868,6 @@ build_ubuntu_gpu_cmake_no_rtc() {
         -DUSE_CUDA=ON                           \
         -DUSE_CUDNN=ON                          \
         -DUSE_TVM_OP=ON                         \
-        -DPython3_EXECUTABLE=/usr/bin/python3   \
         -DUSE_MKL_IF_AVAILABLE=OFF              \
         -DUSE_MKLML_MKL=OFF                     \
         -DUSE_MKLDNN=ON                         \
@@ -921,7 +891,6 @@ build_ubuntu_gpu_cmake_no_tvm_op() {
         -DUSE_CUDA=ON                           \
         -DUSE_CUDNN=ON                          \
         -DUSE_TVM_OP=OFF                        \
-        -DPython3_EXECUTABLE=/usr/bin/python3   \
         -DUSE_MKL_IF_AVAILABLE=OFF              \
         -DUSE_MKLML_MKL=OFF                     \
         -DUSE_MKLDNN=OFF                        \
@@ -961,7 +930,6 @@ build_ubuntu_gpu_large_tensor() {
         -DUSE_CUDA=ON                           \
         -DUSE_CUDNN=ON                          \
         -DUSE_TVM_OP=ON                         \
-        -DPython3_EXECUTABLE=/usr/bin/python3   \
         -DUSE_MKL_IF_AVAILABLE=OFF              \
         -DUSE_MKLML_MKL=OFF                     \
         -DUSE_MKLDNN=OFF                        \
@@ -984,7 +952,8 @@ build_ubuntu_blc() {
 sanity_check() {
     set -ex
     tools/license_header.py check
-    make cpplint rcpplint jnilint
+    make cpplint jnilint
+    make -f R-package/Makefile rcpplint
     make pylint
     nosetests-3.4 tests/tutorials/test_sanity_tutorials.py
 }
diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy
index 3f5fb25..f87a55f 100644
--- a/ci/jenkins/Jenkins_steps.groovy
+++ b/ci/jenkins/Jenkins_steps.groovy
@@ -23,25 +23,27 @@
 utils = load('ci/Jenkinsfile_utils.groovy')
 
 // mxnet libraries
-mx_lib = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, 
lib/libtvmop.so, lib/tvmop.conf, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 
3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a'
-mx_lib_cython = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, 
lib/libtvmop.so, lib/tvmop.conf, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 
3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 
python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so'
+mx_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, 
build/libtvmop.so, build/tvmop.conf, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 
build/3rdparty/openmp/runtime/src/libomp.so'
+mx_lib_cython = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, 
build/libtvmop.so, build/tvmop.conf, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, python/mxnet/_cy3/*.so, 
build/3rdparty/openmp/runtime/src/libomp.so, python/mxnet/_ffi/_cy3/*.so'
+mx_lib_make = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, 
lib/libtvmop.so, lib/tvmop.conf, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 
3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a'
 
 // Python wheels
 mx_pip = 'build/*.whl'
 
 // mxnet cmake libraries, in cmake builds we do not produce a libnvvm static 
library by default.
-mx_cmake_lib = 'build/libmxnet.so, build/libmxnet.a, 
build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, 
build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, 
build/3rdparty/openmp/runtime/src/libomp.so'
-mx_cmake_lib_no_tvm_op = 'build/libmxnet.so, build/libmxnet.a, 
build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, 
build/libsubgraph_lib.so, build/3rdparty/dmlc-core/libdmlc.a, 
build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so'
-mx_cmake_lib_cython = 'build/libmxnet.so, build/libmxnet.a, 
build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, 
build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, 
build/3rdparty/openmp/runtime/src/libomp.so, python/mxnet/_cy3/*.so, 
python/mxnet/_ffi/_cy3/*.so'
+mx_cmake_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, 
build/libtvmop.so, build/tvmop.conf, build/tests/mxnet_unit_tests, 
build/3rdparty/openmp/runtime/src/libomp.so'
+mx_cmake_lib_no_tvm_op = 'build/libmxnet.so, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 
build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so'
+mx_cmake_lib_cython = 'build/libmxnet.so, 
build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, 
build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so, 
python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so'
 // mxnet cmake libraries, in cmake builds we do not produce a libnvvm static 
library by default.
-mx_cmake_lib_debug = 'build/libmxnet.so, build/libmxnet.a, 
build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, 
build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, 
build/libsubgraph_lib.so, build/3rdparty/dmlc-core/libdmlc.a, 
build/tests/mxnet_unit_tests'
-mx_cmake_mkldnn_lib = 'build/libmxnet.so, build/libmxnet.a, 
build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, 
build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, 
build/3rdparty/openmp/runtime/src/libomp.so'
-mx_mkldnn_lib = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, 
lib/libtvmop.so, lib/tvmop.conf, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 
3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a'
-mx_tensorrt_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, 
build/libtvmop.so, build/tvmop.conf, lib/libnvonnxparser_runtime.so.0, 
lib/libnvonnxparser.so.0, lib/libonnx_proto.so, lib/libonnx.so'
-mx_lib_cpp_examples = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, 
lib/libtvmop.so, lib/tvmop.conf, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 
3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 
3rdparty/ps-lite/build/libps.a, deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, 
build/cpp-package/example/*, python/mxnet/_cy3/*.so, 
python/mxnet/_ffi/_cy3/*.so'
-mx_lib_cpp_capi = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, 
lib/libtvmop.so, lib/tvmop.conf, libsample_lib.so, lib/libmkldnn.so.1, 
lib/libmklml_intel.so, 3rdparty/dmlc-core/libdmlc.a, 
3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, 
deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, build/cpp-package/example/*, 
python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so, 
build/tests/cpp/mxnet_unit_tests'
-mx_lib_cpp_examples_no_tvm_op = 'lib/libmxnet.so, lib/libmxnet.a, 
build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, 
build/libsubgraph_lib.so, 3rdparty/dmlc-core/libdmlc.a, 
3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, 
deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, build/cpp-package/example/*, 
python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so'
-mx_lib_cpp_examples_cpu = 'build/libmxnet.so, 
build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, 
build/cpp-package/example/*'
+mx_cmake_lib_debug = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, 
build/libtvmop.so, build/tvmop.conf, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 
build/tests/mxnet_unit_tests'
+mx_mkldnn_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, 
build/libtvmop.so, build/tvmop.conf, 
build/3rdparty/openmp/runtime/src/libomp.so, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so'
+mx_mkldnn_lib_make = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, 
lib/libtvmop.so, lib/tvmop.conf, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 
3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a'
+mx_tensorrt_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, 
build/libtvmop.so, build/tvmop.conf, 
build/3rdparty/openmp/runtime/src/libomp.so, lib/libnvonnxparser_runtime.so.0, 
lib/libnvonnxparser.so.0, lib/libonnx_proto.so, lib/libonnx.so'
+mx_lib_cpp_examples = 'build/libmxnet.so, 
build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, 
build/3rdparty/openmp/runtime/src/libomp.so, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 
build/cpp-package/example/*, python/mxnet/_cy3/*.so, 
python/mxnet/_ffi/_cy3/*.so'
+mx_lib_cpp_examples_make = 'lib/libmxnet.so, lib/libmxnet.a, 
lib/libtvm_runtime.so, lib/libtvmop.so, lib/tvmop.conf, 
build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, 
build/libsubgraph_lib.so, 3rdparty/dmlc-core/libdmlc.a, 
3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, 
deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, build/cpp-package/example/*, 
python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so'
+mx_lib_cpp_capi_make = 'lib/libmxnet.so, lib/libmxnet.a, 
lib/libtvm_runtime.so, lib/libtvmop.so, lib/tvmop.conf, libsample_lib.so, 
lib/libmkldnn.so.1, lib/libmklml_intel.so, 3rdparty/dmlc-core/libdmlc.a, 
3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, 
deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, build/cpp-package/example/*, 
python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so, 
build/tests/cpp/mxnet_unit_tests'
+mx_lib_cpp_examples_no_tvm_op = 'build/libmxnet.so, build/libcustomop_lib.so, 
build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 
build/3rdparty/openmp/runtime/src/libomp.so,  build/cpp-package/example/*, 
python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so'
+mx_lib_cpp_examples_cpu = 'build/libmxnet.so, 
build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, 
build/3rdparty/openmp/runtime/src/libomp.so, build/cpp-package/example/*'
 
 // Python unittest for CPU
 // Python 3
@@ -102,6 +104,20 @@ def compile_unix_cpu_openblas() {
     }]
 }
 
+def compile_unix_cpu_openblas_make() {
+    return ['CPU: Openblas Makefile': {
+      node(NODE_LINUX_CPU) {
+        ws('workspace/build-cpu-openblas') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            utils.init_git()
+            utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_openblas_make', 
false)
+            utils.pack_lib('cpu_make', mx_lib_make)
+          }
+        }
+      }
+    }]
+}
+
 def compile_unix_openblas_debug_cpu() {
     return ['CPU: Openblas, cmake, debug': {
       node(NODE_LINUX_CPU) {
@@ -159,13 +175,13 @@ def compile_unix_int64_gpu() {
 }
 
 def compile_unix_mkl_cpu() {
-    return ['CPU: MKL': {
+    return ['CPU: MKL Makefile': {
       node(NODE_LINUX_CPU) {
         ws('workspace/build-cpu-mkl') {
           timeout(time: max_time, unit: 'MINUTES') {
             utils.init_git()
             utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_mkl', false)
-            utils.pack_lib('cpu_mkl', mx_mkldnn_lib)
+            utils.pack_lib('cpu_mkl', mx_mkldnn_lib_make)
           }
         }
       }
@@ -186,14 +202,28 @@ def compile_unix_mkldnn_cpu() {
     }]
 }
 
+def compile_unix_mkldnn_cpu_make() {
+    return ['CPU: MKLDNN Makefile': {
+      node(NODE_LINUX_CPU) {
+        ws('workspace/build-mkldnn-cpu') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            utils.init_git()
+            utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_mkldnn_make', 
false)
+            utils.pack_lib('mkldnn_cpu_make', mx_mkldnn_lib_make)
+          }
+        }
+      }
+    }]
+}
+
 def compile_unix_mkldnn_mkl_cpu() {
-    return ['CPU: MKLDNN_MKL': {
+    return ['CPU: MKLDNN_MKL Makefile': {
       node(NODE_LINUX_CPU) {
         ws('workspace/build-mkldnn-cpu') {
           timeout(time: max_time, unit: 'MINUTES') {
             utils.init_git()
             utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_mkldnn_mkl', 
false)
-            utils.pack_lib('mkldnn_mkl_cpu', mx_mkldnn_lib)
+            utils.pack_lib('mkldnn_mkl_cpu', mx_mkldnn_lib_make)
           }
         }
       }
@@ -242,42 +272,42 @@ def compile_unix_full_gpu() {
     }]
 }
 
-def compile_unix_full_gpu_mkldnn_cpp_test() {
-    return ['GPU: CUDA10.1+cuDNN7+MKLDNN+CPPTEST': {
+def compile_unix_full_gpu_make() {
+    return ['GPU: CUDA10.1+cuDNN7 Makefile': {
       node(NODE_LINUX_CPU) {
-        ws('workspace/build-gpu-mkldnn-cpp') {
+        ws('workspace/build-gpu') {
           timeout(time: max_time, unit: 'MINUTES') {
             utils.init_git()
-            utils.docker_run('ubuntu_build_cuda', 
'build_ubuntu_gpu_cuda101_cudnn7_mkldnn_cpp_test', false)
-            utils.pack_lib('gpu_mkldnn_cpp_test', mx_lib_cpp_capi)
+            utils.docker_run('ubuntu_build_cuda', 
'build_ubuntu_gpu_cuda101_cudnn7_make', false)
+            utils.pack_lib('gpu_make', mx_lib_cpp_examples_make)
           }
         }
       }
     }]
 }
 
-def compile_unix_full_gpu_no_tvm_op() {
-    return ['GPU: CUDA10.1+cuDNN7 TVM_OP OFF': {
+def compile_unix_full_gpu_mkldnn_cpp_test() {
+    return ['GPU: CUDA10.1+cuDNN7+MKLDNN+CPPTEST Makefile': {
       node(NODE_LINUX_CPU) {
-        ws('workspace/build-gpu-no-tvm-op') {
+        ws('workspace/build-gpu-mkldnn-cpp') {
           timeout(time: max_time, unit: 'MINUTES') {
             utils.init_git()
-            utils.docker_run('ubuntu_build_cuda', 
'build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op', false)
-            utils.pack_lib('gpu_no_tvm_op', mx_lib_cpp_examples_no_tvm_op)
+            utils.docker_run('ubuntu_build_cuda', 
'build_ubuntu_gpu_cuda101_cudnn7_mkldnn_cpp_test', false)
+            utils.pack_lib('gpu_mkldnn_cpp_test_make', mx_lib_cpp_capi_make)
           }
         }
       }
     }]
 }
 
-def compile_unix_cmake_mkldnn_gpu() {
-    return ['GPU: CMake MKLDNN': {
+def compile_unix_full_gpu_no_tvm_op() {
+    return ['GPU: CUDA10.1+cuDNN7 TVM_OP OFF': {
       node(NODE_LINUX_CPU) {
-        ws('workspace/build-cmake-mkldnn-gpu') {
+        ws('workspace/build-gpu-no-tvm-op') {
           timeout(time: max_time, unit: 'MINUTES') {
             utils.init_git()
-            utils.docker_run('ubuntu_gpu_cu101', 
'build_ubuntu_gpu_cmake_mkldnn', false)
-            utils.pack_lib('cmake_mkldnn_gpu', mx_cmake_mkldnn_lib)
+            utils.docker_run('ubuntu_build_cuda', 
'build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op', false)
+            utils.pack_lib('gpu_no_tvm_op', mx_lib_cpp_examples_no_tvm_op)
           }
         }
       }
@@ -352,6 +382,20 @@ def compile_centos7_cpu() {
     }]
 }
 
+def compile_centos7_cpu_make() {
+    return ['CPU: CentOS 7 Makefile': {
+      node(NODE_LINUX_CPU) {
+        ws('workspace/build-centos7-cpu') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            utils.init_git()
+            utils.docker_run('centos7_cpu', 'build_centos7_cpu_make', false)
+            utils.pack_lib('centos7_cpu_make', mx_lib_make)
+          }
+        }
+      }
+    }]
+}
+
 def compile_centos7_cpu_mkldnn() {
     return ['CPU: CentOS 7 MKLDNN': {
       node(NODE_LINUX_CPU) {
@@ -733,11 +777,11 @@ def test_unix_python3_cpu() {
 }
 
 def test_unix_python3_mkl_cpu() {
-    return ['Python3: MKL-CPU': {
+    return ['Python3: MKL-CPU Makefile': {
       node(NODE_LINUX_CPU) {
         ws('workspace/ut-python3-cpu') {
           try {
-            utils.unpack_and_init('cpu_mkl', mx_lib)
+            utils.unpack_and_init('cpu_mkl', mx_lib_make)
             python3_ut('ubuntu_cpu')
             utils.publish_test_coverage()
           } finally {
@@ -849,11 +893,11 @@ def test_unix_python3_mkldnn_cpu() {
 }
 
 def test_unix_python3_mkldnn_mkl_cpu() {
-    return ['Python3: MKLDNN-MKL-CPU': {
+    return ['Python3: MKLDNN-MKL-CPU Makefile': {
       node(NODE_LINUX_CPU) {
         ws('workspace/ut-python3-mkldnn-mkl-cpu') {
           try {
-            utils.unpack_and_init('mkldnn_mkl_cpu', mx_mkldnn_lib)
+            utils.unpack_and_init('mkldnn_mkl_cpu', mx_mkldnn_lib_make)
             python3_ut_mkldnn('ubuntu_cpu')
             utils.publish_test_coverage()
           } finally {
@@ -945,11 +989,11 @@ def test_unix_caffe_gpu() {
 }
 
 def test_unix_cpp_package_gpu() {
-    return ['cpp-package GPU': {
+    return ['cpp-package GPU Makefile': {
       node(NODE_LINUX_GPU) {
         ws('workspace/it-cpp-package') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('gpu', mx_lib_cpp_examples)
+            utils.unpack_and_init('gpu_make', mx_lib_cpp_examples_make)
             utils.docker_run('ubuntu_gpu_cu101', 
'integrationtest_ubuntu_gpu_cpp_package', true)
             utils.publish_test_coverage()
           }
@@ -959,11 +1003,11 @@ def test_unix_cpp_package_gpu() {
 }
 
 def test_unix_capi_cpp_package() {
-    return ['capi-cpp-package GPU': {
+    return ['capi-cpp-package GPU Makefile': {
       node(NODE_LINUX_GPU) {
         ws('workspace/it-capi-cpp-package') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('gpu_mkldnn_cpp_test', mx_lib_cpp_capi)
+            utils.unpack_and_init('gpu_mkldnn_cpp_test_make', 
mx_lib_cpp_capi_make)
             utils.docker_run('ubuntu_gpu_cu101', 
'integrationtest_ubuntu_gpu_capi_cpp_package', true)
             utils.publish_test_coverage()
           }
@@ -973,11 +1017,11 @@ def test_unix_capi_cpp_package() {
 }
 
 def test_unix_scala_cpu() {
-    return ['Scala: CPU': {
+    return ['Scala: CPU Makefile': {
       node(NODE_LINUX_CPU) {
         ws('workspace/ut-scala-cpu') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('cpu', mx_lib)
+            utils.unpack_and_init('cpu_make', mx_lib_make)
             utils.docker_run('ubuntu_cpu', 'integrationtest_ubuntu_cpu_scala', 
false)
             utils.publish_test_coverage()
           }
@@ -987,11 +1031,11 @@ def test_unix_scala_cpu() {
 }
 
 def test_unix_scala_mkldnn_cpu(){
-  return ['Scala: MKLDNN-CPU': {
+  return ['Scala: MKLDNN-CPU Makefile': {
       node(NODE_LINUX_CPU) {
         ws('workspace/ut-scala-mkldnn-cpu') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('mkldnn_cpu', mx_mkldnn_lib)
+            utils.unpack_and_init('mkldnn_cpu_make', mx_mkldnn_lib_make)
             utils.docker_run('ubuntu_cpu', 'integrationtest_ubuntu_cpu_scala', 
false)
             utils.publish_test_coverage()
           }
@@ -1001,11 +1045,11 @@ def test_unix_scala_mkldnn_cpu(){
 }
 
 def test_unix_scala_gpu() {
-    return ['Scala: GPU': {
+    return ['Scala: GPU Makefile': {
       node(NODE_LINUX_GPU) {
         ws('workspace/ut-scala-gpu') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('gpu', mx_lib)
+            utils.unpack_and_init('gpu_make', mx_lib_make)
             utils.docker_run('ubuntu_gpu_cu101', 
'integrationtest_ubuntu_gpu_scala', true)
             utils.publish_test_coverage()
           }
@@ -1015,11 +1059,11 @@ def test_unix_scala_gpu() {
 }
 
 def test_unix_clojure_cpu() {
-    return ['Clojure: CPU': {
+    return ['Clojure: CPU Makefile': {
       node(NODE_LINUX_CPU) {
         ws('workspace/ut-clojure-cpu') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('cpu', mx_lib)
+            utils.unpack_and_init('cpu_make', mx_lib_make)
             utils.docker_run('ubuntu_cpu', 'unittest_ubuntu_cpu_clojure', 
false)
             utils.publish_test_coverage()
           }
@@ -1029,11 +1073,11 @@ def test_unix_clojure_cpu() {
 }
 
 def test_unix_clojure_integration_cpu() {
-    return ['Clojure: CPU Integration': {
+    return ['Clojure: CPU Integration Makefile': {
       node(NODE_LINUX_CPU) {
         ws('workspace/ut-clojure-integration-cpu') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('cpu', mx_lib)
+            utils.unpack_and_init('cpu_make', mx_lib_make)
             utils.docker_run('ubuntu_cpu', 
'unittest_ubuntu_cpu_clojure_integration', false)
           }
         }
@@ -1070,11 +1114,11 @@ def test_unix_r_mkldnn_cpu() {
 }
 
 def test_unix_perl_cpu() {
-    return ['Perl: CPU': {
+    return ['Perl: CPU Makefile': {
       node(NODE_LINUX_CPU) {
         ws('workspace/ut-perl-cpu') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('cpu', mx_lib)
+            utils.unpack_and_init('cpu_make', mx_lib_make)
             utils.docker_run('ubuntu_cpu', 'unittest_ubuntu_cpugpu_perl', 
false)
             utils.publish_test_coverage()
           }
@@ -1097,20 +1141,6 @@ def test_unix_cpp_gpu() {
     }]
 }
 
-def test_unix_cpp_mkldnn_gpu() {
-    return ['Cpp: MKLDNN+GPU': {
-      node(NODE_LINUX_GPU) {
-        ws('workspace/ut-cpp-mkldnn-gpu') {
-          timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('cmake_mkldnn_gpu', mx_cmake_mkldnn_lib)
-            utils.docker_run('ubuntu_gpu_cu101', 'unittest_cpp', true)
-            utils.publish_test_coverage()
-          }
-        }
-      }
-    }]
-}
-
 def test_unix_cpp_cpu() {
     return ['Cpp: CPU': {
       node(NODE_LINUX_CPU) {
@@ -1126,11 +1156,11 @@ def test_unix_cpp_cpu() {
 }
 
 def test_unix_perl_gpu() {
-    return ['Perl: GPU': {
+    return ['Perl: GPU Makefile': {
       node(NODE_LINUX_GPU) {
         ws('workspace/ut-perl-gpu') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('gpu', mx_lib)
+            utils.unpack_and_init('gpu_make', mx_lib_make)
             utils.docker_run('ubuntu_gpu_cu101', 
'unittest_ubuntu_cpugpu_perl', true)
             utils.publish_test_coverage()
           }
@@ -1180,11 +1210,11 @@ def test_unix_julia10_cpu() {
 }
 
 def test_unix_onnx_cpu() {
-    return ['Onnx CPU': {
+    return ['Onnx: CPU Makefile': {
       node(NODE_LINUX_CPU) {
         ws('workspace/it-onnx-cpu') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('cpu', mx_lib)
+            utils.unpack_and_init('cpu_make', mx_lib_make)
             utils.docker_run('ubuntu_cpu', 'integrationtest_ubuntu_cpu_onnx', 
false)
             utils.publish_test_coverage()
           }
@@ -1259,11 +1289,11 @@ def test_centos7_python3_gpu() {
 }
 
 def test_centos7_scala_cpu() {
-    return ['Scala: CentOS CPU': {
+    return ['Scala: CentOS CPU Makefile': {
       node(NODE_LINUX_CPU) {
         ws('workspace/ut-scala-centos7-cpu') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('centos7_cpu', mx_lib)
+            utils.unpack_and_init('centos7_cpu_make', mx_lib_make)
             utils.docker_run('centos7_cpu', 'unittest_centos7_cpu_scala', 
false)
             utils.publish_test_coverage()
           }
@@ -1430,7 +1460,7 @@ def docs_python() {
       node(NODE_LINUX_CPU) {
         ws('workspace/docs') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('libmxnet', mx_lib, false)
+            utils.unpack_and_init('libmxnet', 'lib/libmxnet.so', false)
             utils.docker_run('ubuntu_cpu_python', 'build_python_docs', false)
             if (should_pack_website()) {
               utils.pack_lib('python-artifacts', 
'docs/_build/python-artifacts.tgz', false)
@@ -1466,7 +1496,7 @@ def docs_julia() {
       node(NODE_LINUX_CPU) {
         ws('workspace/docs') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('libmxnet', mx_lib, false)
+            utils.unpack_and_init('libmxnet', 'lib/libmxnet.so', false)
             utils.docker_run('ubuntu_cpu_julia', 'build_julia_docs', false)
             if (should_pack_website()) {
               utils.pack_lib('julia-artifacts', 
'docs/_build/julia-artifacts.tgz', false)
@@ -1484,7 +1514,7 @@ def docs_r() {
       node(NODE_LINUX_CPU) {
         ws('workspace/docs') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('libmxnet', mx_lib, false)
+            utils.unpack_and_init('libmxnet', 'lib/libmxnet.so', false)
             utils.docker_run('ubuntu_cpu_r', 'build_r_docs', false)
             if (should_pack_website()) {
               utils.pack_lib('r-artifacts', 'docs/_build/r-artifacts.tgz', 
false)
@@ -1503,7 +1533,7 @@ def docs_scala() {
       node(NODE_LINUX_CPU) {
         ws('workspace/docs') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('libmxnet', mx_lib, false)
+            utils.unpack_and_init('libmxnet', 'lib/libmxnet.so', false)
             utils.docker_run('ubuntu_cpu_scala', 'build_scala_docs', false)
             if (should_pack_website()) {
               utils.pack_lib('scala-artifacts', 
'docs/_build/scala-artifacts.tgz', false)
@@ -1522,7 +1552,7 @@ def docs_java() {
       node(NODE_LINUX_CPU) {
         ws('workspace/docs') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('libmxnet', mx_lib, false)
+            utils.unpack_and_init('libmxnet', 'lib/libmxnet.so', false)
             utils.docker_run('ubuntu_cpu_scala', 'build_java_docs', false)
             if (should_pack_website()) {
               utils.pack_lib('java-artifacts', 
'docs/_build/java-artifacts.tgz', false)
@@ -1541,7 +1571,7 @@ def docs_clojure() {
       node(NODE_LINUX_CPU) {
         ws('workspace/docs') {
           timeout(time: max_time, unit: 'MINUTES') {
-            utils.unpack_and_init('libmxnet', mx_lib, false)
+            utils.unpack_and_init('libmxnet', 'lib/libmxnet.so', false)
             utils.docker_run('ubuntu_cpu_scala', 'build_clojure_docs', false)
             if (should_pack_website()) {
               utils.pack_lib('clojure-artifacts', 
'docs/_build/clojure-artifacts.tgz', false)
diff --git a/ci/jenkins/Jenkinsfile_centos_cpu 
b/ci/jenkins/Jenkinsfile_centos_cpu
index a47ab3d..793d1f1 100644
--- a/ci/jenkins/Jenkinsfile_centos_cpu
+++ b/ci/jenkins/Jenkinsfile_centos_cpu
@@ -35,13 +35,14 @@ utils.main_wrapper(
 core_logic: {
   utils.parallel_stage('Build', [
     custom_steps.compile_centos7_cpu(),
+    custom_steps.compile_centos7_cpu_make(),
     custom_steps.compile_centos7_cpu_mkldnn()
-  ]) 
+  ])
 
   utils.parallel_stage('Tests', [
     custom_steps.test_centos7_python3_cpu(),
     custom_steps.test_centos7_scala_cpu()
-  ]) 
+  ])
 }
 ,
 failure_handler: {
diff --git a/ci/jenkins/Jenkinsfile_unix_cpu b/ci/jenkins/Jenkinsfile_unix_cpu
index 71917de..5bfad60 100644
--- a/ci/jenkins/Jenkinsfile_unix_cpu
+++ b/ci/jenkins/Jenkinsfile_unix_cpu
@@ -35,9 +35,11 @@ utils.main_wrapper(
 core_logic: {
   utils.parallel_stage('Build', [
     custom_steps.compile_unix_cpu_openblas(),
+    custom_steps.compile_unix_cpu_openblas_make(),
     custom_steps.compile_unix_openblas_debug_cpu(),
     custom_steps.compile_unix_mkl_cpu(),
     custom_steps.compile_unix_mkldnn_cpu(),
+    custom_steps.compile_unix_mkldnn_cpu_make(),
     custom_steps.compile_unix_mkldnn_mkl_cpu(),
     custom_steps.compile_unix_int64_cpu(),
     custom_steps.compile_unix_openblas_cpu_no_tvm_op(),
diff --git a/ci/jenkins/Jenkinsfile_unix_gpu b/ci/jenkins/Jenkinsfile_unix_gpu
index f8c28d5..66d3c13 100644
--- a/ci/jenkins/Jenkinsfile_unix_gpu
+++ b/ci/jenkins/Jenkinsfile_unix_gpu
@@ -37,7 +37,7 @@ core_logic: {
     custom_steps.compile_unix_mkldnn_gpu(),
     custom_steps.compile_unix_mkldnn_nocudnn_gpu(),
     custom_steps.compile_unix_full_gpu(),
-    custom_steps.compile_unix_cmake_mkldnn_gpu(),
+    custom_steps.compile_unix_full_gpu_make(),
     custom_steps.compile_unix_cmake_gpu(),
     custom_steps.compile_unix_tensorrt_gpu(),
     custom_steps.compile_unix_int64_gpu(),
@@ -56,7 +56,6 @@ core_logic: {
     custom_steps.test_unix_perl_gpu(),
     custom_steps.test_unix_r_gpu(),
     custom_steps.test_unix_cpp_gpu(),
-    custom_steps.test_unix_cpp_mkldnn_gpu(),
     custom_steps.test_unix_python3_integration_gpu(),
     custom_steps.test_unix_cpp_package_gpu(),
     custom_steps.test_unix_scala_gpu(),
diff --git a/cmake/BuildCythonModules.cmake b/cmake/BuildCythonModules.cmake
index d2c3a46..48c8d8d 100644
--- a/cmake/BuildCythonModules.cmake
+++ b/cmake/BuildCythonModules.cmake
@@ -16,23 +16,16 @@
 # under the License.
 
 function(add_cython_modules python_version)
-  unset(PYTHON_EXECUTABLE CACHE)
-  set(PYTHONINTERP_FOUND FALSE)
-  find_package(PythonInterp ${python_version} EXACT)
-  if(PYTHONINTERP_FOUND)
-    find_program(CYTHON_EXECUTABLE NAMES cython)
-    if(CYTHON_EXECUTABLE)
-      add_custom_command(COMMAND ${CMAKE_COMMAND} POST_BUILD
-                          -E env 
MXNET_LIBRARY_PATH=${CMAKE_BINARY_DIR}/libmxnet.so
-                          ${PYTHON_EXECUTABLE} setup.py build_ext --inplace 
--with-cython
-                          TARGET mxnet
-                          WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}/python")
-      message("-- Cython modules for python${python_version} will be built")
-      set(PYTHON${python_version}_FOUND 1 PARENT_SCOPE)
-    else()
-      message(FATAL_ERROR "-- Cython not found")
-    endif()
+  find_package(Python3)
+  find_program(CYTHON_EXECUTABLE NAMES cython cython.bat cython3)
+  if(CYTHON_EXECUTABLE AND Python3_EXECUTABLE)
+    add_custom_command(COMMAND ${CMAKE_COMMAND} POST_BUILD
+                        -E env 
MXNET_LIBRARY_PATH=${CMAKE_BINARY_DIR}/libmxnet.so
+                        ${Python3_EXECUTABLE} setup.py build_ext --inplace 
--with-cython
+                        TARGET mxnet
+                        WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}/python")
+    message("-- Cython modules will be built")
   else()
-    set(PYTHON${python_version}_FOUND 0 PARENT_SCOPE)
+    message(FATAL_ERROR "-- Cython not found")
   endif()
 endfunction()
diff --git a/config/distribution/darwin_cpu.cmake 
b/config/distribution/darwin_cpu.cmake
index a0c803c..790e183 100644
--- a/config/distribution/darwin_cpu.cmake
+++ b/config/distribution/darwin_cpu.cmake
@@ -30,3 +30,4 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
 set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
 set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
 set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support")
+set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo")
diff --git a/config/distribution/linux_cpu.cmake 
b/config/distribution/linux_cpu.cmake
index cad3485..15b4f5a 100644
--- a/config/distribution/linux_cpu.cmake
+++ b/config/distribution/linux_cpu.cmake
@@ -28,3 +28,4 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
 set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
 set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
 set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support")
+set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo")
diff --git a/config/distribution/linux_cu100.cmake 
b/config/distribution/linux_cu100.cmake
index d26b4d7..bdbec7e 100644
--- a/config/distribution/linux_cu100.cmake
+++ b/config/distribution/linux_cu100.cmake
@@ -29,6 +29,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
 set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
 set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
 set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support")
+set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo")
 
 set(CUDACXX "/usr/local/cuda-10.0/bin/nvcc" CACHE STRING "Cuda compiler")
 set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.5" CACHE STRING "Cuda architectures")
diff --git a/config/distribution/linux_cu101.cmake 
b/config/distribution/linux_cu101.cmake
index aaf76cc..fd773e8 100644
--- a/config/distribution/linux_cu101.cmake
+++ b/config/distribution/linux_cu101.cmake
@@ -31,6 +31,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
 set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
 set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
 set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support")
+set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo")
 
 set(CUDACXX "/usr/local/cuda-10.1/bin/nvcc" CACHE STRING "Cuda compiler")
 set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.5" CACHE STRING "Cuda architectures")
diff --git a/config/distribution/linux_cu102.cmake 
b/config/distribution/linux_cu102.cmake
index 6b57568..9f740f5 100644
--- a/config/distribution/linux_cu102.cmake
+++ b/config/distribution/linux_cu102.cmake
@@ -29,6 +29,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
 set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
 set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
 set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support")
+set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo")
 
 set(CUDACXX "/usr/local/cuda-10.2/bin/nvcc" CACHE STRING "Cuda compiler")
 set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.5" CACHE STRING "Cuda architectures")
diff --git a/config/distribution/linux_cu75.cmake 
b/config/distribution/linux_cu75.cmake
index 45ba2b9..91ef971 100644
--- a/config/distribution/linux_cu75.cmake
+++ b/config/distribution/linux_cu75.cmake
@@ -29,6 +29,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
 set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
 set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
 set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support")
+set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo")
 
 set(CUDACXX "/usr/local/cuda-7.5/bin/nvcc" CACHE STRING "Cuda compiler")
 set(MXNET_CUDA_ARCH "3.0;3.5;5.0;5.2" CACHE STRING "Cuda architectures")
diff --git a/config/distribution/linux_cu80.cmake 
b/config/distribution/linux_cu80.cmake
index ce8e008..6b98538 100644
--- a/config/distribution/linux_cu80.cmake
+++ b/config/distribution/linux_cu80.cmake
@@ -29,6 +29,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
 set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
 set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
 set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support")
+set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo")
 
 set(CUDACXX "/usr/local/cuda-8.0/bin/nvcc" CACHE STRING "Cuda compiler")
 set(MXNET_CUDA_ARCH "3.0;5.0;6.0;6.2" CACHE STRING "Cuda architectures")
diff --git a/config/distribution/linux_cu90.cmake 
b/config/distribution/linux_cu90.cmake
index 01097cb..1932a32 100644
--- a/config/distribution/linux_cu90.cmake
+++ b/config/distribution/linux_cu90.cmake
@@ -29,6 +29,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
 set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
 set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
 set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support")
+set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo")
 
 set(CUDACXX "/usr/local/cuda-9.0/bin/nvcc" CACHE STRING "Cuda compiler")
 set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.2" CACHE STRING "Cuda architectures")
diff --git a/config/distribution/linux_cu91.cmake 
b/config/distribution/linux_cu91.cmake
index f6301fa..36e10a6 100644
--- a/config/distribution/linux_cu91.cmake
+++ b/config/distribution/linux_cu91.cmake
@@ -29,6 +29,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
 set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
 set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
 set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support")
+set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo")
 
 set(CUDACXX "/usr/local/cuda-9.1/bin/nvcc" CACHE STRING "Cuda compiler")
 set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.2" CACHE STRING "Cuda architectures")
diff --git a/config/distribution/linux_cu92.cmake 
b/config/distribution/linux_cu92.cmake
index 63ab9fc..285dacc 100644
--- a/config/distribution/linux_cu92.cmake
+++ b/config/distribution/linux_cu92.cmake
@@ -29,6 +29,7 @@ set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
 set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
 set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
 set(USE_F16C OFF CACHE BOOL "Build with x86 F16C instruction support")
+set(USE_LIBJPEG_TURBO ON CACHE BOOL "Build with libjpeg-turbo")
 
 set(CUDACXX "/usr/local/cuda-9.2/bin/nvcc" CACHE STRING "Cuda compiler")
 set(MXNET_CUDA_ARCH "3.0;5.0;6.0;7.0;7.2" CACHE STRING "Cuda architectures")
diff --git a/src/operator/mshadow_op.h b/src/operator/mshadow_op.h
index 2d2a0de..9106ee2 100644
--- a/src/operator/mshadow_op.h
+++ b/src/operator/mshadow_op.h
@@ -347,6 +347,12 @@ struct mixed_rpower {
 };
 #endif
 
+
+#pragma GCC diagnostic push
+#if __GNUC__ >= 7
+#pragma GCC diagnostic ignored "-Wint-in-bool-context"
+#pragma GCC diagnostic ignored "-Wbool-compare"
+#endif
 MXNET_BINARY_MATH_OP_NC_WITH_BOOL(mul, a * b);
 
 MXNET_BINARY_MATH_OP_NC_WITH_BOOL(div, a / b);
@@ -354,6 +360,7 @@ MXNET_BINARY_MATH_OP_NC_WITH_BOOL(div, a / b);
 MXNET_BINARY_MATH_OP_NC_WITH_BOOL(plus, a + b);
 
 MXNET_BINARY_MATH_OP_NC_WITH_BOOL(minus, a - b);
+#pragma GCC diagnostic pop
 
 MXNET_UNARY_MATH_OP(negation, -a);
 
@@ -683,6 +690,10 @@ struct fix : public mxnet_op::tunable {
   }
 };
 
+#pragma GCC diagnostic push
+#if __GNUC__ >= 7
+#pragma GCC diagnostic ignored "-Wbool-compare"
+#endif
 /*! \brief used to determine whether a number is Not A Number*/
 struct isnan : public mxnet_op::tunable {
   template<typename DType>
@@ -722,6 +733,7 @@ struct isneginf : public mxnet_op::tunable {
     return IsInf(a) && a < 0;
   }
 };
+#pragma GCC diagnostic pop
 
 /*! \brief used for generate gradient of MAE loss*/
 MXNET_BINARY_MATH_OP_NC(minus_sign, a - b > DType(0) ? DType(1) : -DType(1));
@@ -1301,7 +1313,12 @@ struct nrm2 {
   /*! \brief finalize reduction result */
   template<typename DType>
   MSHADOW_XINLINE static void Finalize(volatile DType& sum_of_squares, 
volatile DType& scale) { // NOLINT(*)
+#pragma GCC diagnostic push
+#if __GNUC__ >= 7
+#pragma GCC diagnostic ignored "-Wint-in-bool-context"
+#endif
     sum_of_squares = scale * math::sqrt(sum_of_squares);
+#pragma GCC diagnostic pop
   }
   /*!
    *\brief calculate gradient of redres with respect to redsrc,
@@ -1395,6 +1412,11 @@ struct nanprod_grad : public mxnet_op::tunable {
   }
 };
 
+#pragma GCC diagnostic push
+#if __GNUC__ >= 7
+#pragma GCC diagnostic ignored "-Wint-in-bool-context"
+#pragma GCC diagnostic ignored "-Wbool-compare"
+#endif
 /*! \brief used for computing binary lowest common multiple */
 struct lcm : public mxnet_op::tunable {
   template<typename DType>
@@ -1436,6 +1458,7 @@ struct lcm : public mxnet_op::tunable {
     return DType(0.0f);
   }
 };
+#pragma GCC diagnostic pop
 
 }  // namespace mshadow_op
 }  // namespace op
diff --git a/src/operator/numpy/linalg/np_norm-inl.h 
b/src/operator/numpy/linalg/np_norm-inl.h
index 643554f..e244c65 100644
--- a/src/operator/numpy/linalg/np_norm-inl.h
+++ b/src/operator/numpy/linalg/np_norm-inl.h
@@ -71,6 +71,10 @@ struct nrmlp {
   /*! \brief do stable reduction into dst */
   template<typename AType, typename DType>
   MSHADOW_XINLINE void Reduce(volatile AType& sum_of_powers, volatile DType 
src, volatile DType& scale) { // NOLINT(*)
+#pragma GCC diagnostic push
+#if __GNUC__ >= 7
+#pragma GCC diagnostic ignored "-Wint-in-bool-context"
+#endif
     if (src != 0) {
       DType src_abs = abs::Map(src);
       if (scale < src_abs) {
@@ -81,6 +85,7 @@ struct nrmlp {
         sum_of_powers = sum_of_powers + 
AType(lp_power(static_cast<double>(src_abs / scale), lp));
       }
     }
+#pragma GCC diagnostic pop
   }
 
   /*! \brief combine the results of two reducers */
@@ -111,9 +116,14 @@ struct nrmlp {
   /*! \brief finalize reduction result */
   template<typename DType>
   MSHADOW_XINLINE void Finalize(volatile DType& sum_of_powers, volatile DType& 
scale) { // NOLINT(*)
+#pragma GCC diagnostic push
+#if __GNUC__ >= 7
+#pragma GCC diagnostic ignored "-Wint-in-bool-context"
+#endif
     if (lp != 0.0) {
       sum_of_powers = scale * 
DType(lp_power(static_cast<double>(sum_of_powers), 1.0 / lp));
     }
+#pragma GCC diagnostic pop
   }
 
   /*!

Reply via email to