You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by an...@apache.org on 2018/08/17 00:02:02 UTC

[incubator-mxnet] branch master updated: MKLDNN can be turned off with env var (#12058)

This is an automated email from the ASF dual-hosted git repository.

anirudh2290 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 6843914  MKLDNN can be turned off with env var (#12058)
6843914 is described below

commit 6843914f642c8343aaa9a09db803b6af6f5d94a2
Author: Alexander Zai <az...@gmail.com>
AuthorDate: Thu Aug 16 20:01:50 2018 -0400

    MKLDNN can be turned off with env var (#12058)
    
    * fallback when env set
    
    * fix mkdnn default env
    
    * update docs to include desc about MXNET_MKLDNN_ENABLED
    
    * update env name to MXNET_MKLDNN_ENABLED
    
    * check dev_mask is cpu before fallback
    
    * log if flag is off
    
    * cache mkldnn check output
    
    * move logonce inside fallback
    
    * retrigger
---
 docs/faq/env_var.md                             |  5 +++++
 src/common/utils.h                              |  7 +++++++
 src/executor/attach_op_execs_pass.cc            |  1 -
 src/operator/nn/activation.cc                   |  8 ++++++++
 src/operator/nn/batch_norm.cc                   |  3 +++
 src/operator/nn/concat.cc                       |  8 ++++++++
 src/operator/nn/convolution.cc                  |  8 ++++++--
 src/operator/nn/deconvolution.cc                |  8 ++++++--
 src/operator/nn/fully_connected.cc              |  9 +++++++++
 src/operator/nn/lrn.cc                          | 19 +++++++++++--------
 src/operator/nn/mkldnn/mkldnn_base-inl.h        |  5 +++++
 src/operator/nn/pooling.cc                      | 10 ++++++++--
 src/operator/nn/softmax.cc                      |  4 +++-
 src/operator/tensor/elemwise_binary_op_basic.cc |  8 ++++++--
 14 files changed, 85 insertions(+), 18 deletions(-)

diff --git a/docs/faq/env_var.md b/docs/faq/env_var.md
index 15ba225..0664d79 100644
--- a/docs/faq/env_var.md
+++ b/docs/faq/env_var.md
@@ -167,6 +167,11 @@ When USE_PROFILER is enabled in Makefile or CMake, the following environments ca
 * MXNET_HOME
   - Data directory in the filesystem for storage, for example when downloading gluon models.
   - Default in *nix is .mxnet APPDATA/mxnet in windows.
+  
+* MXNET_MKLDNN_ENABLED
+  - Values: 0, 1 ```(default=1)```
+  - Flag to enable or disable MKLDNN accelerator. On by default.
+  - Only applies to mxnet that has been compiled with MKLDNN (```pip install mxnet-mkl``` or built from source with ```USE_MKLDNN=1```)
 
 Settings for Minimum Memory Usage
 ---------------------------------
diff --git a/src/common/utils.h b/src/common/utils.h
index fcc3da8..2688979 100644
--- a/src/common/utils.h
+++ b/src/common/utils.h
@@ -46,6 +46,9 @@
 #include <limits>
 
 #include "../operator/mxnet_op.h"
+#if MXNET_USE_MKLDNN == 1
+#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
+#endif
 
 namespace mxnet {
 namespace common {
@@ -468,6 +471,10 @@ inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
     "0 to suppress this warning.";
   os << "\nStorage type fallback detected:\n" << op_str << warning;
   LogOnce(os.str());
+#if MXNET_USE_MKLDNN == 1
+  if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
+                                       "You can re-enable by setting MXNET_MKLDNN_ENABLED=1");
+#endif
 }
 
 // heuristic to dermine number of threads per GPU
diff --git a/src/executor/attach_op_execs_pass.cc b/src/executor/attach_op_execs_pass.cc
index 72919d9..c011c1d 100644
--- a/src/executor/attach_op_execs_pass.cc
+++ b/src/executor/attach_op_execs_pass.cc
@@ -261,7 +261,6 @@ void CreateOpExecs(const Graph& g, OpExecVector* p_ret, size_t i) {
   const auto& vshape = g.GetAttr<ShapeVector>("shape");
   const auto& vctx = g.GetAttr<ContextVector>("context");
   const auto& dispatch_modes = g.GetAttr<DispatchModeVector>("dispatch_mode");
-
   // get the graph
   const auto& idx = g.indexed_graph();
   OpExecVector& ret = *p_ret;
diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc
index 3404b5b..277ca8e 100644
--- a/src/operator/nn/activation.cc
+++ b/src/operator/nn/activation.cc
@@ -116,6 +116,10 @@ inline static bool ActivationStorageType(const nnvm::NodeAttrs& attrs,
   if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) {
     *dispatch_mode = DispatchMode::kFComputeEx;
   }
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+    return ret;
+  }
 #endif
   return ret;
 }
@@ -158,6 +162,10 @@ inline static bool BackwardActStorageType(const nnvm::NodeAttrs& attrs,
   if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) {
     *dispatch_mode = DispatchMode::kFComputeEx;
   }
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+    return ret;
+  }
 #endif
   return ret;
 }
diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc
index 30fb665..c7b1b60 100644
--- a/src/operator/nn/batch_norm.cc
+++ b/src/operator/nn/batch_norm.cc
@@ -460,6 +460,9 @@ static inline bool BatchNormStorageType(const nnvm::NodeAttrs &attrs,
     dispatched = MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode,
                                    in_attrs, out_attrs);
   }
+  if (!MKLDNNEnvSet()) {
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+  }
 #else
   for (int& v : *in_attrs)
     if (v == - 1) v = kDefaultStorage;
diff --git a/src/operator/nn/concat.cc b/src/operator/nn/concat.cc
index 7c7f403..9df459e 100644
--- a/src/operator/nn/concat.cc
+++ b/src/operator/nn/concat.cc
@@ -194,6 +194,10 @@ inline static bool ConcatForwardInferStorageType(const nnvm::NodeAttrs& attrs,
   if (!dispatched) {
     dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
+#if MXNET_USE_MKLDNN == 1
+  if (!MKLDNNEnvSet())
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+#endif
   return dispatched;
 }
 
@@ -213,6 +217,10 @@ inline static bool BackwardConcatStorageType(const nnvm::NodeAttrs& attrs,
   else
 #endif
     wanted_mode = DispatchMode::kFCompute;
+#if MXNET_USE_MKLDNN == 1
+  if (!MKLDNNEnvSet())
+    wanted_mode = DispatchMode::kFComputeFallback;
+#endif
   return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                              dispatch_mode, wanted_mode);
 }
diff --git a/src/operator/nn/convolution.cc b/src/operator/nn/convolution.cc
index e879623..18c0132 100644
--- a/src/operator/nn/convolution.cc
+++ b/src/operator/nn/convolution.cc
@@ -300,7 +300,9 @@ inline static bool ConvStorageType(const nnvm::NodeAttrs& attrs,
 
   DispatchMode wanted_mode;
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask)
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
+    wanted_mode = DispatchMode::kFComputeFallback;
+  else if (dev_mask == mshadow::cpu::kDevMask)
     wanted_mode = DispatchMode::kFComputeEx;
   else
 #endif
@@ -322,7 +324,9 @@ inline static bool BackwardConvStorageType(const nnvm::NodeAttrs& attrs,
 
   DispatchMode wanted_mode;
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask)
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
+    wanted_mode = DispatchMode::kFComputeFallback;
+  else if (dev_mask == mshadow::cpu::kDevMask)
     wanted_mode = DispatchMode::kFComputeEx;
   else
 #endif
diff --git a/src/operator/nn/deconvolution.cc b/src/operator/nn/deconvolution.cc
index 9e0a701..54b77aa 100644
--- a/src/operator/nn/deconvolution.cc
+++ b/src/operator/nn/deconvolution.cc
@@ -268,7 +268,9 @@ inline static bool DeconvStorageType(const nnvm::NodeAttrs& attrs,
 
   DispatchMode wanted_mode;
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask)
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
+    wanted_mode = DispatchMode::kFComputeFallback;
+  else if (dev_mask == mshadow::cpu::kDevMask)
     wanted_mode = DispatchMode::kFComputeEx;
   else
 #endif
@@ -289,7 +291,9 @@ inline static bool BackwardDeconvStorageType(const nnvm::NodeAttrs& attrs,
 
   DispatchMode wanted_mode;
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask)
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
+    wanted_mode = DispatchMode::kFComputeFallback;
+  else if (dev_mask == mshadow::cpu::kDevMask)
     wanted_mode = DispatchMode::kFComputeEx;
   else
 #endif
diff --git a/src/operator/nn/fully_connected.cc b/src/operator/nn/fully_connected.cc
index f720a10..eb881d2 100644
--- a/src/operator/nn/fully_connected.cc
+++ b/src/operator/nn/fully_connected.cc
@@ -193,6 +193,11 @@ inline static bool FCStorageType(const nnvm::NodeAttrs& attrs,
     dispatched = storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                                      dispatch_mode, DispatchMode::kFComputeEx);
   }
+#if MXNET_USE_MKLDNN == 1
+  if (!MKLDNNEnvSet())
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+#endif
+
   if (!dispatched) {
     dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
@@ -223,6 +228,10 @@ inline static bool BackwardFCStorageType(const nnvm::NodeAttrs& attrs,
     dispatched = storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                                      dispatch_mode, DispatchMode::kFCompute);
   }
+#if MXNET_USE_MKLDNN == 1
+  if (!MKLDNNEnvSet())
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+#endif
   return dispatched;
 }
 
diff --git a/src/operator/nn/lrn.cc b/src/operator/nn/lrn.cc
index 30a7523..4433519 100644
--- a/src/operator/nn/lrn.cc
+++ b/src/operator/nn/lrn.cc
@@ -88,10 +88,12 @@ bool LRNForwardInferStorageType(const nnvm::NodeAttrs& attrs,
                                 std::vector<int> *out_attrs) {
   CHECK(!in_attrs->empty());
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask) {
-    storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+                        dispatch_mode, DispatchMode::kFComputeFallback);
+  } else if (dev_mask == mshadow::cpu::kDevMask) {
+    return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                         dispatch_mode, DispatchMode::kFComputeEx);
-    return true;
   }
 #endif
   storage_type_assign(out_attrs, mxnet::kDefaultStorage,
@@ -106,15 +108,16 @@ bool LRNBackwardInferStorageType(const nnvm::NodeAttrs& attrs,
                                  std::vector<int> *out_attrs) {
   CHECK(!in_attrs->empty());
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask) {
-    storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+                        dispatch_mode, DispatchMode::kFComputeFallback);
+  } else if (dev_mask == mshadow::cpu::kDevMask) {
+    return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                         dispatch_mode, DispatchMode::kFComputeEx);
-    return true;
   }
 #endif
-  storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+  return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                       dispatch_mode, DispatchMode::kFCompute);
-  return true;
 }
 
 #if MXNET_USE_MKLDNN == 1
diff --git a/src/operator/nn/mkldnn/mkldnn_base-inl.h b/src/operator/nn/mkldnn/mkldnn_base-inl.h
index bbfb873..273afcd 100644
--- a/src/operator/nn/mkldnn/mkldnn_base-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_base-inl.h
@@ -137,6 +137,11 @@ static inline bool SupportMKLDNN(const NDArray &input) {
       && SupportStorageMKLDNN(input.storage_type());
 }
 
+static inline bool MKLDNNEnvSet() {
+  static bool is_mkldnn_enabled = dmlc::GetEnv("MXNET_MKLDNN_ENABLED", true);
+  return is_mkldnn_enabled;
+}
+
 /*
  * This is to align address to a certain alignment.
  */
diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc
index 2380f0f..7cb1450 100644
--- a/src/operator/nn/pooling.cc
+++ b/src/operator/nn/pooling.cc
@@ -295,7 +295,10 @@ inline static bool PoolingStorageType(const nnvm::NodeAttrs &attrs,
 
 #if MXNET_USE_MKLDNN == 1
   const PoolingParam &param = nnvm::get<PoolingParam>(attrs.parsed);
-  if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) {
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+                        dispatch_mode, DispatchMode::kFComputeFallback);
+  } else if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) {
     return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                                dispatch_mode, DispatchMode::kFComputeEx);
   }
@@ -316,7 +319,10 @@ inline static bool BackwardPoolingStorageType(const nnvm::NodeAttrs &attrs,
   CHECK_EQ(out_attrs->size(), 1);
 
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) {
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
+                               dispatch_mode, DispatchMode::kFComputeFallback);
+  } else if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) {
     return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
                                dispatch_mode, DispatchMode::kFComputeEx);
   }
diff --git a/src/operator/nn/softmax.cc b/src/operator/nn/softmax.cc
index e855608..c58f382 100644
--- a/src/operator/nn/softmax.cc
+++ b/src/operator/nn/softmax.cc
@@ -63,7 +63,9 @@ inline static bool SoftmaxStorageType(const nnvm::NodeAttrs& attrs,
   DispatchMode wanted_mode;
 #if MXNET_USE_MKLDNN == 1
   // We only run MKLDNN op if it runs on CPU.
-  if (dev_mask == mshadow::cpu::kDevMask)
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
+    wanted_mode = DispatchMode::kFComputeFallback;
+  else if (dev_mask == mshadow::cpu::kDevMask)
     wanted_mode = DispatchMode::kFComputeEx;
   else
 #endif
diff --git a/src/operator/tensor/elemwise_binary_op_basic.cc b/src/operator/tensor/elemwise_binary_op_basic.cc
index 6fc1ebb..884a1dd 100644
--- a/src/operator/tensor/elemwise_binary_op_basic.cc
+++ b/src/operator/tensor/elemwise_binary_op_basic.cc
@@ -62,7 +62,9 @@ static inline bool ElemwiseAddStorageType(const nnvm::NodeAttrs& attrs,
   bool ret = ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>(
                attrs, dev_mask, dispatch_mode, in_attrs, out_attrs);
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+  } else if (dev_mask == mshadow::cpu::kDevMask
       && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)
       && out_attrs->at(0) == kDefaultStorage) {
     *dispatch_mode = DispatchMode::kFComputeEx;
@@ -132,7 +134,9 @@ static inline bool ElemwiseAddBackwardStorageType(const nnvm::NodeAttrs& attrs,
   bool ret = ElemwiseStorageType<1, 2, true, true, true>(attrs, dev_mask, dispatch_mode,
                                                          in_attrs, out_attrs);
 #if MXNET_USE_MKLDNN == 1
-  if (dev_mask == mshadow::cpu::kDevMask) {
+  if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+  } else if (dev_mask == mshadow::cpu::kDevMask) {
     *dispatch_mode = DispatchMode::kFComputeEx;
   }
 #endif