marcoabreu closed pull request #9810: remove MKL_EXPERIMENTAL and update make 
files for MKL-DNN
URL: https://github.com/apache/incubator-mxnet/pull/9810
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/Jenkinsfile b/Jenkinsfile
index 17d546c87f4..a20d9db545c 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -24,6 +24,7 @@
 mx_lib = 'lib/libmxnet.so, lib/libmxnet.a, dmlc-core/libdmlc.a, 
nnvm/lib/libnnvm.a'
 // mxnet cmake libraries, in cmake builds we do not produce a libnvvm static 
library by default.
 mx_cmake_lib = 'build/libmxnet.so, build/libmxnet.a, 
build/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, 
build/3rdparty/openmp/runtime/src/libomp.so'
+mx_cmake_mkldnn_lib = 'build/libmxnet.so, build/libmxnet.a, 
build/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, 
build/3rdparty/openmp/runtime/src/libomp.so, 
build/3rdparty/mkldnn/src/libmkldnn.so, 
build/3rdparty/mkldnn/src/libmkldnn.so.0'
 mx_mkldnn_lib = 'lib/libmxnet.so, lib/libmxnet.a, lib/libiomp5.so, 
lib/libmklml_gnu.so, lib/libmkldnn.so, lib/libmkldnn.so.0, 
lib/libmklml_intel.so, dmlc-core/libdmlc.a, nnvm/lib/libnnvm.a'
 // command to start a docker container
 docker_run = 'tests/ci_build/ci_build.sh'
@@ -260,6 +261,23 @@ try {
         }
       }
     },
+    'GPU: CMake MKLDNN': {
+      node('mxnetlinux-cpu') {
+        ws('workspace/build-cmake-mkldnn-gpu') {
+          init_git()
+          def defines = """            \
+            -DUSE_CUDA=1               \
+            -DUSE_CUDNN=1              \
+            -DUSE_MKLML_MKL=1          \
+            -DUSE_MKLDNN=1             \
+            -DCMAKE_BUILD_TYPE=Release \
+            """
+            def flag = "-v"
+            cmake("build_cuda", defines, flag)
+          pack_lib('cmake_mkldnn_gpu', mx_cmake_mkldnn_lib)
+        }
+      }
+    },
     'GPU: CMake': {
       node('mxnetlinux-cpu') {
         ws('workspace/build-cmake-gpu') {
diff --git a/MKL_README.md b/MKL_README.md
index 0f97416ac36..5374adb8e42 100644
--- a/MKL_README.md
+++ b/MKL_README.md
@@ -17,46 +17,3 @@ Installing and enabling the full MKL installation enables 
MKL support for all op
 
   5. Run 'sudo python setup.py install'
 
-# MKL2017 PLUGIN
-
-MKL2017 is an INTEL released library to accelerate Deep Neural Network (DNN) 
applications on Intel architecture.
-
-MKL2017_ML is a subset of MKL2017 and only contains DNN acceleration feature, 
MKL2017 release cycle is longer then MKL2017_ML and MKL2017_ML support latest 
feature
-
-This README shows the user how to setup and install MKL2017 library with mxnet.
-
-## Build/Install MXNet with MKL:
-
-  1. Enable USE_MKL2017=1 in make/config.mk
-
-    1.1 By default, MKL_2017_EXPRIEMENTAL=0. If setting 
MKL_2017_EXPRIEMENTAL=1, MKL buffer will be created and transferred between 
layers to achiever much higher performance.
-
-    1.2 By default, MKLML_ROOT=/usr/local, MKL2017_ML will be used
-
-      1.2.1 when excute make, Makefile will execute "prepare_mkl.sh" to 
download the MKL2017_ML library under <MKLML_ROOT>
-
-      1.2.2 manually steps for download MKL2017_ML problem
-
-        1.2.2.1 wget 
https://github.com/dmlc/web-data/raw/master/mxnet/mklml-release/mklml_lnx_<MKL 
VERSION>.tgz
-
-        1.2.2.2 tar zxvf mklml_lnx_<MKL VERSION>.tgz
-    
-        1.2.2.3 cp -rf mklml_lnx_<MKL VERSION>/* <MKLML_ROOT>/
-
-      1.2.3 Set LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$MKLML_ROOT/lib
-
-    1.3 If setting USE_BLAS=mkl
-
-      1.3.1 mshadow can also utilize mkl blas function in mklml package  
-
-    1.4 MKL version compatibility
-        
-        1.3.2.1 If you already have MKL installed and MKLROOT being set in 
your system, by default, it will not attempt to download the latest mklml 
package unless you unset MKLROOT. 
-
-  2. Run 'make -jX'
-       
-  3. Navigate into the python directory
-  
-  4. Run 'sudo python setup.py install'
-
-
diff --git a/docker_multiarch/arm.crosscompile.android.mk 
b/docker_multiarch/arm.crosscompile.android.mk
index 36b8e9bed79..0302c5cf25a 100644
--- a/docker_multiarch/arm.crosscompile.android.mk
+++ b/docker_multiarch/arm.crosscompile.android.mk
@@ -82,21 +82,6 @@ USE_OPENCV = 0
 # use openmp for parallelization
 USE_OPENMP = 1
 
-# MKL ML Library for Intel CPU/Xeon Phi
-# Please refer to MKL_README.md for details
-
-# MKL ML Library folder, need to be root for /usr/local
-# Change to User Home directory for standard user
-# For USE_BLAS!=mkl only
-MKLML_ROOT=/usr/local
-
-# whether use MKL2017 library
-USE_MKL2017 = 0
-
-# whether use MKL2017 experimental feature for high performance
-# Prerequisite USE_MKL2017=1
-USE_MKL2017_EXPERIMENTAL = 0
-
 # whether use NNPACK library
 USE_NNPACK = 0
 
@@ -115,13 +100,10 @@ USE_LAPACK_PATH =
 USE_INTEL_PATH = NONE
 
 # If use MKL only for BLAS, choose static link automatically to allow python 
wrapper
-ifeq ($(USE_MKL2017), 0)
+USE_STATIC_MKL = NONE
 ifeq ($(USE_BLAS), mkl)
 USE_STATIC_MKL = 1
 endif
-else
-USE_STATIC_MKL = NONE
-endif
 
 #----------------------------
 # distributed computing
diff --git a/docker_multiarch/arm.crosscompile.mk 
b/docker_multiarch/arm.crosscompile.mk
index da0552eb2fb..2bca9e396d7 100644
--- a/docker_multiarch/arm.crosscompile.mk
+++ b/docker_multiarch/arm.crosscompile.mk
@@ -82,21 +82,6 @@ USE_OPENCV = 0
 # use openmp for parallelization
 USE_OPENMP = 1
 
-# MKL ML Library for Intel CPU/Xeon Phi
-# Please refer to MKL_README.md for details
-
-# MKL ML Library folder, need to be root for /usr/local
-# Change to User Home directory for standard user
-# For USE_BLAS!=mkl only
-MKLML_ROOT=/usr/local
-
-# whether use MKL2017 library
-USE_MKL2017 = 0
-
-# whether use MKL2017 experimental feature for high performance
-# Prerequisite USE_MKL2017=1
-USE_MKL2017_EXPERIMENTAL = 0
-
 # whether use NNPACK library
 USE_NNPACK = 0
 
@@ -115,13 +100,10 @@ USE_LAPACK_PATH =
 USE_INTEL_PATH = NONE
 
 # If use MKL only for BLAS, choose static link automatically to allow python 
wrapper
-ifeq ($(USE_MKL2017), 0)
+USE_STATIC_MKL = NONE
 ifeq ($(USE_BLAS), mkl)
 USE_STATIC_MKL = 1
 endif
-else
-USE_STATIC_MKL = NONE
-endif
 
 #----------------------------
 # distributed computing
@@ -176,4 +158,4 @@ USE_CPP_PACKAGE = 0
 # whether to use sframe integration. This requires build sframe
 # g...@github.com:dato-code/SFrame.git
 # SFRAME_PATH = $(HOME)/SFrame
-# MXNET_PLUGINS += plugin/sframe/plugin.mk
\ No newline at end of file
+# MXNET_PLUGINS += plugin/sframe/plugin.mk
diff --git a/docs/faq/perf.md b/docs/faq/perf.md
index 519959810f3..e021f1e9a21 100644
--- a/docs/faq/perf.md
+++ b/docs/faq/perf.md
@@ -18,10 +18,7 @@ Performance is mainly affected by the following 4 factors:
 ## Intel CPU
 
 For using Intel Xeon CPUs for training and inference, we suggest enabling
-both `USE_MKL2017 = 1` and `USE_MKL2017_EXPERIMENTAL = 1` in
-`config.mk`. Check
-[MKL_README.md](https://github.com/dmlc/mxnet/blob/master/MKL_README.md) for
-details.
+`USE_MKLDNN = 1` in`config.mk`. 
 
 We also find that setting the following two environment variables can help:
 - `export KMP_AFFINITY=granularity=fine,compact,1,0` if there are two physical 
CPUs
diff --git a/example/image-classification/benchmark_score.py 
b/example/image-classification/benchmark_score.py
index b6d1d642c86..82903b63238 100644
--- a/example/image-classification/benchmark_score.py
+++ b/example/image-classification/benchmark_score.py
@@ -70,7 +70,7 @@ def score(network, dev, batch_size, num_batches):
 if __name__ == '__main__':
     networks = ['alexnet', 'vgg-16', 'inception-bn', 'inception-v3', 
'resnet-50', 'resnet-152']
     devs = [mx.gpu(0)] if len(get_gpus()) > 0 else []
-    # Enable USE_MKL2017_EXPERIMENTAL for better CPU performance
+    # Enable USE_MKLDNN for better CPU performance
     devs.append(mx.cpu())
 
     batch_sizes = [1, 2, 4, 8, 16, 32]
diff --git a/make/config.mk b/make/config.mk
index 1591d2ad60a..fa429f31f29 100644
--- a/make/config.mk
+++ b/make/config.mk
@@ -95,20 +95,8 @@ USE_LIBJPEG_TURBO_PATH = NONE
 # use openmp for parallelization
 USE_OPENMP = 1
 
-# MKL ML Library for Intel CPU/Xeon Phi
-# Please refer to MKL_README.md for details
-
-# MKL ML Library folder, need to be root for /usr/local
-# Change to User Home directory for standard user
-# For USE_BLAS!=mkl only
-MKLML_ROOT=/usr/local
-
-# whether use MKL2017 library
-USE_MKL2017 = 0
-
-# whether use MKL2017 experimental feature for high performance
-# Prerequisite USE_MKL2017=1
-USE_MKL2017_EXPERIMENTAL = 0
+# whether use MKL-DNN library
+USE_MKLDNN = 0
 
 # whether use NNPACK library
 USE_NNPACK = 0
diff --git a/make/osx.mk b/make/osx.mk
index 624677966e1..47f395bccf9 100644
--- a/make/osx.mk
+++ b/make/osx.mk
@@ -88,7 +88,7 @@ USE_BLAS = apple
 USE_LAPACK = 1
 
 # by default, disable lapack when using MKL
-# switch on when there is a full installation of MKL available (not just 
MKL2017/MKL_ML)
+# switch on when there is a full installation of MKL available (not just 
MKL_ML)
 ifeq ($(USE_BLAS), mkl)
 USE_LAPACK = 0
 endif
diff --git a/tests/python/cpu/test_mklml.py b/tests/python/cpu/test_mkldnn.py
similarity index 81%
rename from tests/python/cpu/test_mklml.py
rename to tests/python/cpu/test_mkldnn.py
index decd5b1c985..16b30039737 100644
--- a/tests/python/cpu/test_mklml.py
+++ b/tests/python/cpu/test_mkldnn.py
@@ -16,24 +16,24 @@
 # under the License.
 
 """
-MKLML related test cases
+MKL-DNN related test cases
 """
 
 import logging
 import os
 from sys import platform
 
-def test_mklml_install():
+def test_mkldnn_install():
     """
     This test will verify that MXNet is built/installed correctly when 
-    compiled with Intel MKLML library. The method will try to import 
-    the mxnet module and see if the mklml library is mapped to this 
+    compiled with Intel MKL-DNN library. The method will try to import 
+    the mxnet module and see if the mkldnn library is mapped to this 
     process's address space.
     """
     logging.basicConfig(level=logging.INFO)
     
     if not platform.startswith('linux'):
-        logging.info("Bypass mklml install test for non-Linux OS")
+        logging.info("Bypass mkldnn install test for non-Linux OS")
         return
 
     try:
@@ -45,14 +45,14 @@ def test_mklml_install():
 
     pid = os.getpid()
     rc = os.system("cat /proc/" + str(pid) + \
-                       "/maps | grep libmklml_ > /dev/null")
+                       "/maps | grep libmkldnn > /dev/null")
 
     if rc == 0:
-        logging.info("MXNet is built/installed correctly with MKLML")
+        logging.info("MXNet is built/installed correctly with MKL-DNN")
     else:
-        assert 0, "MXNet is built/installed incorrectly with MKLML, please " \
+        assert 0, "MXNet is built/installed incorrectly with MKL-DNN, please " 
\
                "double check your build/install steps or environment " \
                "variable settings"
 
 if __name__ == '__main__':
-    test_mklml_install()
+    test_mkldnn_install()


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to