anirudh2290 closed pull request #12058: MKLDNN can be turned off with env var
URL: https://github.com/apache/incubator-mxnet/pull/12058
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/docs/faq/env_var.md b/docs/faq/env_var.md
index 6e9a3594168..017592d99c0 100644
--- a/docs/faq/env_var.md
+++ b/docs/faq/env_var.md
@@ -155,6 +155,11 @@ When USE_PROFILER is enabled in Makefile or CMake, the 
following environments ca
 * MXNET_HOME
   - Data directory in the filesystem for storage, for example when downloading 
gluon models.
   - Default in *nix is .mxnet APPDATA/mxnet in windows.
+  
+* MXNET_MKLDNN_ENABLED
+  - Values: 0, 1 ```(default=1)```
+  - Flag to enable or disable MKLDNN accelerator. On by default.
+  - Only applies to mxnet that has been compiled with MKLDNN (```pip install 
mxnet-mkl``` or built from source with ```USE_MKLDNN=1```)
 
 Settings for Minimum Memory Usage
 ---------------------------------
diff --git a/src/common/utils.h b/src/common/utils.h
index 96949a047fb..96569b31735 100644
--- a/src/common/utils.h
+++ b/src/common/utils.h
@@ -46,6 +46,9 @@
 #include <limits>
 
 #include "../operator/mxnet_op.h"
+#if MXNET_USE_MKLDNN == 1
+#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
+#endif
 
 namespace mxnet {
 namespace common {
@@ -468,6 +471,10 @@ inline void LogStorageFallback(const nnvm::NodeAttrs& 
attrs,
     "0 to suppress this warning.";
   os << "\nStorage type fallback detected:\n" << op_str << warning;
   LogOnce(os.str());
+#if MXNET_USE_MKLDNN == 1
+  if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
+                                       "You can re-enable by setting 
MXNET_MKLDNN_ENABLED=1");
+#endif
 }
 
 // heuristic to dermine number of threads per GPU
diff --git a/src/executor/attach_op_execs_pass.cc 
b/src/executor/attach_op_execs_pass.cc
index 72919d90c62..c011c1d9ce0 100644
--- a/src/executor/attach_op_execs_pass.cc
+++ b/src/executor/attach_op_execs_pass.cc
@@ -261,7 +261,6 @@ void CreateOpExecs(const Graph& g, OpExecVector* p_ret, 
size_t i) {
   const auto& vshape = g.GetAttr<ShapeVector>("shape");
   const auto& vctx = g.GetAttr<ContextVector>("context");
   const auto& dispatch_modes = g.GetAttr<DispatchModeVector>("dispatch_mode");
-
   // get the graph
   const auto& idx = g.indexed_graph();
   OpExecVector& ret = *p_ret;
diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc
index 3404b5b700a..277ca8e3013 100644
--- a/src/operator/nn/activation.cc
+++ b/src/operator/nn/activation.cc
@@ -116,6 +116,10 @@ inline static bool ActivationStorageType(const 
nnvm::NodeAttrs& attrs,
   if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) {
     *dispatch_mode = DispatchMode::kFComputeEx;
   }
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+    return ret;
+  }
 #endif
   return ret;
 }
@@ -158,6 +162,10 @@ inline static bool BackwardActStorageType(const 
nnvm::NodeAttrs& attrs,
   if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) {
     *dispatch_mode = DispatchMode::kFComputeEx;
   }
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+    return ret;
+  }
 #endif
   return ret;
 }
diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc
index 30fb665dd05..c7b1b609990 100644
--- a/src/operator/nn/batch_norm.cc
+++ b/src/operator/nn/batch_norm.cc
@@ -460,6 +460,9 @@ static inline bool BatchNormStorageType(const 
nnvm::NodeAttrs &attrs,
     dispatched = MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode,
                                    in_attrs, out_attrs);
   }
+  if (!MKLDNNEnvSet()) {
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+  }
 #else
   for (int& v : *in_attrs)
     if (v == - 1) v = kDefaultStorage;
diff --git a/src/operator/nn/concat.cc b/src/operator/nn/concat.cc
index 7c7f403d698..9df459e9224 100644
--- a/src/operator/nn/concat.cc
+++ b/src/operator/nn/concat.cc
@@ -194,6 +194,10 @@ inline static bool ConcatForwardInferStorageType(const 
nnvm::NodeAttrs& attrs,
   if (!dispatched) {
     dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
+#if MXNET_USE_MKLDNN == 1
+  if (!MKLDNNEnvSet())
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+#endif
   return dispatched;
 }
 
@@ -213,6 +217,10 @@ inline static bool BackwardConcatStorageType(const 
nnvm::NodeAttrs& attrs,
   else
 #endif
     wanted_mode = DispatchMode::kFCompute;
+#if MXNET_USE_MKLDNN == 1
+  if (!MKLDNNEnvSet())
+    wanted_mode = DispatchMode::kFComputeFallback;
+#endif
   return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                              dispatch_mode, wanted_mode);
 }
diff --git a/src/operator/nn/convolution.cc b/src/operator/nn/convolution.cc
index ef70ccd6ec1..681f3138dcb 100644
--- a/src/operator/nn/convolution.cc
+++ b/src/operator/nn/convolution.cc
@@ -300,7 +300,9 @@ inline static bool ConvStorageType(const nnvm::NodeAttrs& 
attrs,
 
   DispatchMode wanted_mode;
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask)
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
+    wanted_mode = DispatchMode::kFComputeFallback;
+  else if (dev_mask == mshadow::cpu::kDevMask)
     wanted_mode = DispatchMode::kFComputeEx;
   else
 #endif
@@ -322,7 +324,9 @@ inline static bool BackwardConvStorageType(const 
nnvm::NodeAttrs& attrs,
 
   DispatchMode wanted_mode;
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask)
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
+    wanted_mode = DispatchMode::kFComputeFallback;
+  else if (dev_mask == mshadow::cpu::kDevMask)
     wanted_mode = DispatchMode::kFComputeEx;
   else
 #endif
diff --git a/src/operator/nn/deconvolution.cc b/src/operator/nn/deconvolution.cc
index 9e0a70121bf..54b77aafda0 100644
--- a/src/operator/nn/deconvolution.cc
+++ b/src/operator/nn/deconvolution.cc
@@ -268,7 +268,9 @@ inline static bool DeconvStorageType(const nnvm::NodeAttrs& 
attrs,
 
   DispatchMode wanted_mode;
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask)
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
+    wanted_mode = DispatchMode::kFComputeFallback;
+  else if (dev_mask == mshadow::cpu::kDevMask)
     wanted_mode = DispatchMode::kFComputeEx;
   else
 #endif
@@ -289,7 +291,9 @@ inline static bool BackwardDeconvStorageType(const 
nnvm::NodeAttrs& attrs,
 
   DispatchMode wanted_mode;
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask)
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
+    wanted_mode = DispatchMode::kFComputeFallback;
+  else if (dev_mask == mshadow::cpu::kDevMask)
     wanted_mode = DispatchMode::kFComputeEx;
   else
 #endif
diff --git a/src/operator/nn/fully_connected.cc 
b/src/operator/nn/fully_connected.cc
index d1d84e97529..09a32fcb50e 100644
--- a/src/operator/nn/fully_connected.cc
+++ b/src/operator/nn/fully_connected.cc
@@ -193,6 +193,11 @@ inline static bool FCStorageType(const nnvm::NodeAttrs& 
attrs,
     dispatched = storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                                      dispatch_mode, DispatchMode::kFComputeEx);
   }
+#if MXNET_USE_MKLDNN == 1
+  if (!MKLDNNEnvSet())
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+#endif
+
   if (!dispatched) {
     dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
@@ -223,6 +228,10 @@ inline static bool BackwardFCStorageType(const 
nnvm::NodeAttrs& attrs,
     dispatched = storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                                      dispatch_mode, DispatchMode::kFCompute);
   }
+#if MXNET_USE_MKLDNN == 1
+  if (!MKLDNNEnvSet())
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+#endif
   return dispatched;
 }
 
diff --git a/src/operator/nn/lrn.cc b/src/operator/nn/lrn.cc
index 6b3d7c81837..7159902a6df 100644
--- a/src/operator/nn/lrn.cc
+++ b/src/operator/nn/lrn.cc
@@ -88,10 +88,12 @@ bool LRNForwardInferStorageType(const nnvm::NodeAttrs& 
attrs,
                                 std::vector<int> *out_attrs) {
   CHECK(!in_attrs->empty());
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask) {
-    storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+                        dispatch_mode, DispatchMode::kFComputeFallback);
+  } else if (dev_mask == mshadow::cpu::kDevMask) {
+    return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                         dispatch_mode, DispatchMode::kFComputeEx);
-    return true;
   }
 #endif
   storage_type_assign(out_attrs, mxnet::kDefaultStorage,
@@ -106,15 +108,16 @@ bool LRNBackwardInferStorageType(const nnvm::NodeAttrs& 
attrs,
                                  std::vector<int> *out_attrs) {
   CHECK(!in_attrs->empty());
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask) {
-    storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+                        dispatch_mode, DispatchMode::kFComputeFallback);
+  } else if (dev_mask == mshadow::cpu::kDevMask) {
+    return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                         dispatch_mode, DispatchMode::kFComputeEx);
-    return true;
   }
 #endif
-  storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+  return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                       dispatch_mode, DispatchMode::kFCompute);
-  return true;
 }
 
 #if MXNET_USE_MKLDNN == 1
diff --git a/src/operator/nn/mkldnn/mkldnn_base-inl.h 
b/src/operator/nn/mkldnn/mkldnn_base-inl.h
index bbfb873ee86..273afcd32dc 100644
--- a/src/operator/nn/mkldnn/mkldnn_base-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_base-inl.h
@@ -137,6 +137,11 @@ static inline bool SupportMKLDNN(const NDArray &input) {
       && SupportStorageMKLDNN(input.storage_type());
 }
 
+static inline bool MKLDNNEnvSet() {
+  static bool is_mkldnn_enabled = dmlc::GetEnv("MXNET_MKLDNN_ENABLED", true);
+  return is_mkldnn_enabled;
+}
+
 /*
  * This is to align address to a certain alignment.
  */
diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc
index 9b6996d0feb..77445abc714 100644
--- a/src/operator/nn/pooling.cc
+++ b/src/operator/nn/pooling.cc
@@ -295,7 +295,10 @@ inline static bool PoolingStorageType(const 
nnvm::NodeAttrs &attrs,
 
 #if MXNET_USE_MKLDNN == 1
   const PoolingParam &param = nnvm::get<PoolingParam>(attrs.parsed);
-  if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) {
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+                        dispatch_mode, DispatchMode::kFComputeFallback);
+  } else if (dev_mask == mshadow::cpu::kDevMask && 
SupportMKLDNNPooling(param)) {
     return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                                dispatch_mode, DispatchMode::kFComputeEx);
   }
@@ -316,7 +319,10 @@ inline static bool BackwardPoolingStorageType(const 
nnvm::NodeAttrs &attrs,
   CHECK_EQ(out_attrs->size(), 1);
 
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) {
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+                               dispatch_mode, DispatchMode::kFComputeFallback);
+  } else if (dev_mask == mshadow::cpu::kDevMask && 
SupportMKLDNNPooling(param)) {
     return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                                dispatch_mode, DispatchMode::kFComputeEx);
   }
diff --git a/src/operator/nn/softmax.cc b/src/operator/nn/softmax.cc
index e855608e7f2..c58f382bbad 100644
--- a/src/operator/nn/softmax.cc
+++ b/src/operator/nn/softmax.cc
@@ -63,7 +63,9 @@ inline static bool SoftmaxStorageType(const nnvm::NodeAttrs& 
attrs,
   DispatchMode wanted_mode;
 #if MXNET_USE_MKLDNN == 1
   // We only run MKLDNN op if it runs on CPU.
-  if (dev_mask == mshadow::cpu::kDevMask)
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
+    wanted_mode = DispatchMode::kFComputeFallback;
+  else if (dev_mask == mshadow::cpu::kDevMask)
     wanted_mode = DispatchMode::kFComputeEx;
   else
 #endif
diff --git a/src/operator/tensor/elemwise_binary_op_basic.cc 
b/src/operator/tensor/elemwise_binary_op_basic.cc
index 6fc1ebb76cf..884a1ddad21 100644
--- a/src/operator/tensor/elemwise_binary_op_basic.cc
+++ b/src/operator/tensor/elemwise_binary_op_basic.cc
@@ -62,7 +62,9 @@ static inline bool ElemwiseAddStorageType(const 
nnvm::NodeAttrs& attrs,
   bool ret = ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>(
                attrs, dev_mask, dispatch_mode, in_attrs, out_attrs);
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+  } else if (dev_mask == mshadow::cpu::kDevMask
       && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)
       && out_attrs->at(0) == kDefaultStorage) {
     *dispatch_mode = DispatchMode::kFComputeEx;
@@ -132,7 +134,9 @@ static inline bool ElemwiseAddBackwardStorageType(const 
nnvm::NodeAttrs& attrs,
   bool ret = ElemwiseStorageType<1, 2, true, true, true>(attrs, dev_mask, 
dispatch_mode,
                                                          in_attrs, out_attrs);
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask) {
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+  } else if (dev_mask == mshadow::cpu::kDevMask) {
     *dispatch_mode = DispatchMode::kFComputeEx;
   }
 #endif


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to