You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by jx...@apache.org on 2018/01/19 00:25:45 UTC

[incubator-mxnet] branch master updated: refactor logging in infer storage pass (#9464)

This is an automated email from the ASF dual-hosted git repository.

jxie pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 92d79f2  refactor logging in infer storage pass (#9464)
92d79f2 is described below

commit 92d79f26a5c2914f2087ca1360c0b2090f22014f
Author: Haibin Lin <li...@gmail.com>
AuthorDate: Thu Jan 18 16:25:40 2018 -0800

    refactor logging in infer storage pass (#9464)
    
    * refactor log message in infer storage
    
    * fix bug. support imperative
    
    * remove common:: prefix
    
    * fix gpu build
    
    * address review comments
    
    * use char* and osstream
---
 src/common/utils.h                                 | 63 +++++++++++++++++++
 src/executor/infer_graph_attr_pass.cc              | 11 +++-
 src/imperative/imperative_utils.h                  | 16 +++--
 src/operator/operator_common.h                     | 70 +++-------------------
 src/operator/optimizer_op-inl.h                    | 13 ++--
 src/operator/tensor/broadcast_reduce_op.h          | 11 +---
 src/operator/tensor/cast_storage-inl.h             |  6 +-
 src/operator/tensor/dot-inl.h                      | 16 ++---
 src/operator/tensor/elemwise_binary_op.cc          | 14 ++---
 src/operator/tensor/elemwise_binary_op.h           | 15 ++---
 src/operator/tensor/elemwise_binary_scalar_op.h    |  2 +-
 .../tensor/elemwise_binary_scalar_op_basic.cc      | 12 ++--
 src/operator/tensor/elemwise_scatter_op.cc         | 16 +----
 src/operator/tensor/elemwise_sum.cc                | 12 ++--
 src/operator/tensor/elemwise_sum.cu                |  9 ++-
 src/operator/tensor/elemwise_unary_op.h            |  4 +-
 src/operator/tensor/elemwise_unary_op_basic.cc     |  5 +-
 src/operator/tensor/indexing_op.h                  | 18 ++----
 src/operator/tensor/init_op.h                      |  7 +--
 src/operator/tensor/matrix_op-inl.h                |  7 +--
 src/operator/tensor/matrix_op.cc                   |  5 +-
 src/operator/tensor/sparse_retain-inl.h            | 12 +---
 src/operator/tensor/square_sum-inl.h               | 18 ++----
 23 files changed, 156 insertions(+), 206 deletions(-)

diff --git a/src/common/utils.h b/src/common/utils.h
index 4bb8024..73ba6db 100644
--- a/src/common/utils.h
+++ b/src/common/utils.h
@@ -364,6 +364,50 @@ inline std::string dev_type_string(const int dev_type) {
   return "unknown";
 }
 
+/*! \brief get string representation of the operator stypes */
+inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
+                                         const int dev_mask,
+                                         const std::vector<int>& in_attrs,
+                                         const std::vector<int>& out_attrs) {
+  std::ostringstream os;
+  os << "operator = " << attrs.op->name
+     << "\ninput storage types = [";
+  for (const int attr : in_attrs) {
+    os << stype_string(attr) << ", ";
+  }
+  os << "]\n"
+     << "output storage types = [";
+  for (const int attr : out_attrs) {
+    os << stype_string(attr) << ", ";
+  }
+  os << "]\n"
+     << "params = {";
+  for (auto kv : attrs.dict) {
+    os << "\"" << kv.first << "\" : " << kv.second << ", ";
+  }
+  os << "}\n"
+     << "context.dev_mask = " << dev_type_string(dev_mask);
+  return os.str();
+}
+
+/*! \brief get string representation of the operator */
+inline std::string operator_string(const nnvm::NodeAttrs& attrs,
+                                  const OpContext& ctx,
+                                  const std::vector<NDArray>& inputs,
+                                  const std::vector<OpReqType>& req,
+                                  const std::vector<NDArray>& outputs) {
+  std::string result = "";
+  std::vector<int> in_stypes;
+  std::vector<int> out_stypes;
+  in_stypes.reserve(inputs.size());
+  out_stypes.reserve(outputs.size());
+  auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
+  std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
+  std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
+  result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
+  return result;
+}
+
 /*! \brief log message once. Intended for storage fallback warning messages. */
 inline void LogOnce(const std::string& message) {
   typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
@@ -374,6 +418,25 @@ inline void LogOnce(const std::string& message) {
   }
 }
 
+/*! \brief log storage fallback event
+ */
+inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
+                               const int dev_mask,
+                               const std::vector<int>* in_attrs,
+                               const std::vector<int>* out_attrs) {
+  static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
+  if (!log) return;
+  const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
+  std::ostringstream os;
+  const char* warning = "\nThe operator with default storage type will be dispatched "
+    "for execution. You're seeing this warning message because the operator above is unable "
+    "to process the given ndarrays with specified storage types, context and parameter. "
+    "Temporary dense ndarrays are generated in order to execute the operator. "
+    "You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
+    "0 to suppress this warning.";
+  os << "\nStorage type fallback detected:\n" << op_str << warning;
+  LogOnce(os.str());
+}
 
 // heuristic to dermine number of threads per GPU
 inline int GetNumThreadPerGPU() {
diff --git a/src/executor/infer_graph_attr_pass.cc b/src/executor/infer_graph_attr_pass.cc
index 67e61aa..73a34c8 100644
--- a/src/executor/infer_graph_attr_pass.cc
+++ b/src/executor/infer_graph_attr_pass.cc
@@ -50,7 +50,15 @@ bool ApplyOpInferAttr<int, FInferStorageType>(const nnvm::Graph& g,
                                               std::vector<int>* out_attrs,
                                               DispatchMode* dispatch_mode) {
   const DevMaskVector& dev_masks = g.GetAttr<DevMaskVector>("dev_mask");
-  return finfer(attrs, dev_masks[nid], dispatch_mode, in_attrs, out_attrs);
+  const bool success = finfer(attrs, dev_masks[nid], dispatch_mode, in_attrs, out_attrs);
+  if (!success) {
+    LOG(FATAL) << "Operator not implemented: "
+               << common::operator_stype_string(attrs, dev_masks[nid], *in_attrs, *out_attrs);
+  }
+  if (*dispatch_mode == DispatchMode::kFComputeFallback) {
+    common::LogStorageFallback(attrs, dev_masks[nid], in_attrs, out_attrs);
+  }
+  return true;
 }
 
 /*!\brief
@@ -357,7 +365,6 @@ inline bool DefaultStorageType(const nnvm::NodeAttrs& attrs,
   if (*dispatch_mode == DispatchMode::kUndefined) {
     if (fallback) {
       *dispatch_mode = DispatchMode::kFComputeFallback;
-      op::LogStorageFallback(attrs, dev_mask, iattr, oattr);
     } else {
       *dispatch_mode = DispatchMode::kFCompute;
     }
diff --git a/src/imperative/imperative_utils.h b/src/imperative/imperative_utils.h
index add568d..fc28f50 100644
--- a/src/imperative/imperative_utils.h
+++ b/src/imperative/imperative_utils.h
@@ -138,15 +138,21 @@ inline void SetShapeType(const Context& ctx,
   for (auto& i : outputs) {
     out_storage_types.push_back(i->storage_type());
   }
+  bool infer_stype_success;
   if (inferstorage.count(attrs.op)) {
-    CHECK(inferstorage[attrs.op](attrs, ctx.dev_mask(), dispatch_mode,
-                                 &in_storage_types, &out_storage_types));
+    infer_stype_success = inferstorage[attrs.op](attrs, ctx.dev_mask(), dispatch_mode,
+                                                 &in_storage_types, &out_storage_types);
   } else {
     // if infer storage attr is not present, apply the default infer storage function
-    bool success = exec::DefaultStorageType(attrs, ctx.dev_mask(), dispatch_mode,
-                                            &in_storage_types, &out_storage_types);
-    CHECK(success);
+    infer_stype_success = exec::DefaultStorageType(attrs, ctx.dev_mask(), dispatch_mode,
+                                                   &in_storage_types, &out_storage_types);
   }
+  CHECK(infer_stype_success) << "Operator not implemented: "
+     << common::operator_stype_string(attrs, ctx.dev_mask(), in_storage_types, out_storage_types);
+  if (*dispatch_mode == DispatchMode::kFComputeFallback) {
+    common::LogStorageFallback(attrs, ctx.dev_mask(), &in_storage_types, &out_storage_types);
+  }
+
   CHECK_EQ(out_storage_types.size(), outputs.size());
   CHECK(*dispatch_mode != DispatchMode::kUndefined);
 
diff --git a/src/operator/operator_common.h b/src/operator/operator_common.h
index 329db02..10581d1 100644
--- a/src/operator/operator_common.h
+++ b/src/operator/operator_common.h
@@ -350,11 +350,12 @@ inline bool storage_type_assign(StorageTypeVector* stypes,
 
 /*! \brief update the stype vector to default storage and dispatch_mode to fallback
  */
-inline void dispatch_fallback(StorageTypeVector* stypes, DispatchMode* dispatch) {
+inline bool dispatch_fallback(StorageTypeVector* stypes, DispatchMode* dispatch) {
   for (auto& stype : *stypes) {
     type_assign(&stype, kDefaultStorage);
   }
   DISPATCH_MODE_ASSIGN_CHECK(dispatch, 0, DispatchMode::kFComputeFallback);
+  return true;
 }
 
 // make a new node with operator op_name. Inputs are not filled.
@@ -479,66 +480,13 @@ inline void ParamParser(nnvm::NodeAttrs* attrs) {
           << ") == " << param << ".shape[0] (" << rsp.shape()[0] << ").";          \
   }
 
-/*! \brief get string representation of the operator stypes */
-inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
-                                         const int dev_mask,
-                                         const std::vector<int>& in_attrs,
-                                         const std::vector<int>& out_attrs) {
-  std::string result = "";
-  result += "operator = " + attrs.op->name + "\n";
-  result += "input storage types = [";
-  for (const auto attr : in_attrs) {
-    result += common::stype_string(attr) + ", ";
-  }
-  result += "]\n";
-  result += "output storage types = [";
-  for (const auto attr : out_attrs) {
-    result += common::stype_string(attr) + ", ";
-  }
-  result += "]\n";
-  result += "params = {";
-  for (auto kv : attrs.dict) {
-    result += "\"" + kv.first + "\" : " + kv.second + ", ";
-  }
-  result += "}\n";
-  result += "context.dev_mask = " + common::dev_type_string(dev_mask);
-  return result;
-}
-
-/*! \brief get string representation of the operator */
-inline std::string operator_string(const nnvm::NodeAttrs& attrs,
-                                  const OpContext& ctx,
-                                  const std::vector<NDArray>& inputs,
-                                  const std::vector<OpReqType>& req,
-                                  const std::vector<NDArray>& outputs) {
-  std::string result = "";
-  std::vector<int> in_stypes;
-  std::vector<int> out_stypes;
-  auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
-  std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
-  std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
-  result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
-  return result;
-}
-
-/*! \brief log storage fallback event
- */
-inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
-                               const int dev_mask,
-                               const std::vector<int>* in_attrs,
-                               const std::vector<int>* out_attrs) {
-  static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
-  if (!log) return;
-  const std::string op_str = op::operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
-  std::ostringstream os;
-  os << "\nStorage type fallback detected:\n" << op_str
-     << "\nThe operator with default storage type will be dispatched for execution. "
-     << "You're seeing this warning message because the operator above is unable to "
-     << "process the given ndarrays with specified storage types, context and parameter. "
-     << "Temporary dense ndarrays are generated in order to execute the operator. "
-     << "You can set environment variable "
-     << "MXNET_STORAGE_FALLBACK_LOG_VERBOSE to 0 to suppress this warning.";
-  common::LogOnce(os.str());
+inline void LogUnimplementedOp(const nnvm::NodeAttrs& attrs,
+                               const OpContext &ctx,
+                               const std::vector<NDArray> &inputs,
+                               const std::vector<OpReqType> &req,
+                               const std::vector<NDArray> &outputs) {
+    using common::operator_string;
+    LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
 }
 
 }  // namespace op
diff --git a/src/operator/optimizer_op-inl.h b/src/operator/optimizer_op-inl.h
index 0e0dd7f..60981aa 100644
--- a/src/operator/optimizer_op-inl.h
+++ b/src/operator/optimizer_op-inl.h
@@ -197,7 +197,7 @@ inline void SGDUpdateEx(const nnvm::NodeAttrs& attrs,
     NDArray out = outputs[0];
     SGDUpdateRspRspImpl<xpu>(param, ctx, inputs[0], inputs[1], req[0], &out);
   } else {
-    LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
@@ -496,10 +496,9 @@ inline bool StdOptStorageType(const nnvm::NodeAttrs& attrs,
   }
 
   if (!dispatched) {
-    dispatch_fallback(out_attrs, dispatch_mode);
-    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
+    dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
-  return true;
+  return dispatched;
 }
 
 template<int req>
@@ -584,7 +583,7 @@ inline void SGDMomUpdateEx(const nnvm::NodeAttrs& attrs,
              out_stype == kRowSparseStorage) {
     SGDMomStdUpdateRspRspDnsImpl<xpu>(param, ctx, weight, grad, mom, req[0], &out);
   } else {
-    LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
@@ -948,7 +947,7 @@ inline void AdamUpdateEx(const nnvm::NodeAttrs& attrs,
      AdamStdUpdateRspRspRspImpl<xpu>(param, ctx, inputs[0], inputs[1], inputs[2],
                                      inputs[3], req[0], &out);
   } else {
-    LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
@@ -1343,7 +1342,7 @@ inline void FtrlUpdateEx(const nnvm::NodeAttrs& attrs,
      FtrlUpdateRspRspRspImpl<xpu>(param, ctx, inputs[0], inputs[1], inputs[2],
                                   inputs[3], req[0], &out);
   } else {
-    LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
diff --git a/src/operator/tensor/broadcast_reduce_op.h b/src/operator/tensor/broadcast_reduce_op.h
index 6e92c8a..2ae409f 100644
--- a/src/operator/tensor/broadcast_reduce_op.h
+++ b/src/operator/tensor/broadcast_reduce_op.h
@@ -374,13 +374,9 @@ inline bool SumOpForwardInferStorageType(const nnvm::NodeAttrs& attrs,
   if (!dispatched) {
     // If input is csr, but keepdims or exclude is set or summing along a axis
     // different from 0 or 1
-    dispatch_fallback(out_attrs, dispatch_mode);
+    dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
-  if (*dispatch_mode == DispatchMode::kFComputeFallback) {
-    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
-  }
-
-  return true;
+  return dispatched;
 }
 
 template<typename xpu, typename reducer>
@@ -683,8 +679,7 @@ void SumOpForwardEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx,
     NDArray output = outputs[0];
     SumCsrImpl<xpu, normalize>(attrs, s, ctx, inputs[0], req[0], &output);
   } else {
-    LOG(FATAL) << "Not implemented: "
-               << operator_string(attrs, ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
diff --git a/src/operator/tensor/cast_storage-inl.h b/src/operator/tensor/cast_storage-inl.h
index ebe19d4..ed20027 100644
--- a/src/operator/tensor/cast_storage-inl.h
+++ b/src/operator/tensor/cast_storage-inl.h
@@ -397,11 +397,7 @@ inline bool CastStorageInferStorageType(const nnvm::NodeAttrs& attrs,
     dispatched = storage_type_assign(out_attrs, param_stype,
                                      dispatch_mode, DispatchMode::kFComputeEx);
   }
-  if (!dispatched) {
-    LOG(FATAL) << "Not implemented: "
-               << operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
-  }
-  return true;
+  return dispatched;
 }
 
 template<typename xpu>
diff --git a/src/operator/tensor/dot-inl.h b/src/operator/tensor/dot-inl.h
index 349b136..c5f278e 100644
--- a/src/operator/tensor/dot-inl.h
+++ b/src/operator/tensor/dot-inl.h
@@ -244,12 +244,9 @@ inline bool DotForwardInferStorageType(const nnvm::NodeAttrs& attrs,
                                      dispatch_ex);
   }
   if (!dispatched) {
-    dispatch_fallback(out_attrs, dispatch_mode);
+    dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
-  if (static_cast<DispatchMode>(*dispatch_mode) == DispatchMode::kFComputeFallback) {
-    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
-  }
-  return true;
+  return dispatched;
 }
 
 inline bool DotBackwardInferStorageType(const nnvm::NodeAttrs& attrs,
@@ -294,10 +291,9 @@ inline bool DotBackwardInferStorageType(const nnvm::NodeAttrs& attrs,
     }
   }
   if (!dispatched) {
-    dispatch_fallback(out_attrs, dispatch_mode);
-    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
+    dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
-  return true;
+  return dispatched;
 }
 
 /*!
@@ -1070,7 +1066,7 @@ void DotForwardEx(const nnvm::NodeAttrs& attrs,
     NDArray ret = outputs[0];
     DotDnsCsrCsrImpl<xpu>(ctx, inputs[0].data(), inputs[1], req[0], &ret);
   } else {
-    LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
@@ -1112,7 +1108,7 @@ void DotBackwardEx(const nnvm::NodeAttrs& attrs,
     TBlob ret = outputs[1].data();
     DotCsrRspDnsImpl(ctx, xpu(), inputs[1], inputs[0], req[1], !param.transpose_a, &ret);
   } else {
-    LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
diff --git a/src/operator/tensor/elemwise_binary_op.cc b/src/operator/tensor/elemwise_binary_op.cc
index 931132b..e8ba2fa 100644
--- a/src/operator/tensor/elemwise_binary_op.cc
+++ b/src/operator/tensor/elemwise_binary_op.cc
@@ -46,12 +46,9 @@ bool ElemwiseBinaryOp::SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
     dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, dispatch_ex);
   }
   if (!dispatched) {
-    dispatch_fallback(out_attrs, dispatch_mode);
+    dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
-  if (*dispatch_mode == DispatchMode::kFComputeFallback) {
-    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
-  }
-  return true;
+  return dispatched;
 }
 
 bool ElemwiseBinaryOp::BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
@@ -78,12 +75,9 @@ bool ElemwiseBinaryOp::BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
     }
   }
   if (!dispatched) {
-    dispatch_fallback(out_attrs, dispatch_mode);
-  }
-  if (*dispatch_mode == DispatchMode::kFComputeFallback) {
-    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
+    dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
-  return true;
+  return dispatched;
 }
 
 }  // namespace op
diff --git a/src/operator/tensor/elemwise_binary_op.h b/src/operator/tensor/elemwise_binary_op.h
index 6fc4107..9a151d3 100644
--- a/src/operator/tensor/elemwise_binary_op.h
+++ b/src/operator/tensor/elemwise_binary_op.h
@@ -300,12 +300,9 @@ class ElemwiseBinaryOp : public OpBase {
       }
     }
     if (!dispatched) {
-      dispatch_fallback(out_attrs, dispatch_mode);
+      dispatched = dispatch_fallback(out_attrs, dispatch_mode);
     }
-    if (*dispatch_mode == DispatchMode::kFComputeFallback) {
-      LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
-    }
-    return true;
+    return dispatched;
   }
 
   /*!
@@ -403,7 +400,7 @@ class ElemwiseBinaryOp : public OpBase {
         });
       });
     } else {
-      LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+      LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
     }
   }
 
@@ -445,7 +442,7 @@ class ElemwiseBinaryOp : public OpBase {
     } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
       ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
     } else {
-      LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+      LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
     }
   }
 
@@ -490,7 +487,7 @@ class ElemwiseBinaryOp : public OpBase {
         DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
         UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
       } else {
-        LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+        LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
       }
     }
     // rhs grad
@@ -501,7 +498,7 @@ class ElemwiseBinaryOp : public OpBase {
         DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
         UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
       } else {
-        LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+        LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
       }
     }
   }
diff --git a/src/operator/tensor/elemwise_binary_scalar_op.h b/src/operator/tensor/elemwise_binary_scalar_op.h
index 0419e99..0fcfe70 100644
--- a/src/operator/tensor/elemwise_binary_scalar_op.h
+++ b/src/operator/tensor/elemwise_binary_scalar_op.h
@@ -269,7 +269,7 @@ class BinaryScalarOp : public UnaryOp {
         });
       });
     } else {
-      LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+      LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
     }
   }
 
diff --git a/src/operator/tensor/elemwise_binary_scalar_op_basic.cc b/src/operator/tensor/elemwise_binary_scalar_op_basic.cc
index 6792379..8c12218 100644
--- a/src/operator/tensor/elemwise_binary_scalar_op_basic.cc
+++ b/src/operator/tensor/elemwise_binary_scalar_op_basic.cc
@@ -74,12 +74,9 @@ static bool BinaryScalarStorageTypeWithDenseResultStorageType(const NodeAttrs& a
       dispatch_mode, dispatch_ex);
   }
   if (!dispatched) {
-    dispatch_fallback(out_attrs, dispatch_mode);
+    dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
-  if (static_cast<DispatchMode>(*dispatch_mode) == DispatchMode::kFComputeFallback) {
-    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
-  }
-  return true;
+  return dispatched;
 }
 
 static bool BinaryScalarStorageType(const nnvm::NodeAttrs& attrs,
@@ -118,10 +115,9 @@ static bool BinaryScalarStorageType(const nnvm::NodeAttrs& attrs,
     }
   }
   if (!dispatched) {
-    dispatch_fallback(out_attrs, dispatch_mode);
-    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
+    dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
-  return true;
+  return dispatched;
 }
 
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SCALAR_SUPPORT_WITH_DENSE_RESULT(_plus_scalar)
diff --git a/src/operator/tensor/elemwise_scatter_op.cc b/src/operator/tensor/elemwise_scatter_op.cc
index 2f0883d..959faf7 100644
--- a/src/operator/tensor/elemwise_scatter_op.cc
+++ b/src/operator/tensor/elemwise_scatter_op.cc
@@ -23,18 +23,6 @@
 namespace mxnet {
 namespace op {
 
-static bool fail_storage_type_inference(const NodeAttrs& attrs,
-                                        const int dev_mask,
-                                        DispatchMode* dispatch_mode,
-                                        std::vector<int>* in_attrs,
-                                        std::vector<int>* out_attrs) {
-  dispatch_fallback(out_attrs, dispatch_mode);
-  if (*dispatch_mode == DispatchMode::kFComputeFallback) {
-    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
-  }
-  return true;
-}
-
 static bool StorageTypeRspOrDenseOutput(const NodeAttrs& attrs,
                                         const int dev_mask,
                                         DispatchMode* dispatch_mode,
@@ -57,7 +45,7 @@ static bool StorageTypeRspOrDenseOutput(const NodeAttrs& attrs,
       return true;
     }
   }
-  return fail_storage_type_inference(attrs, dev_mask, dispatch_mode, in_attrs, out_attrs);
+  return dispatch_fallback(out_attrs, dispatch_mode);
 }
 
 static bool StorageTypeScatteredScalarOp(const NodeAttrs& attrs,
@@ -74,7 +62,7 @@ static bool StorageTypeScatteredScalarOp(const NodeAttrs& attrs,
                                                   : DispatchMode::kFComputeEx)) {
     return true;
   }
-  return fail_storage_type_inference(attrs, dev_mask, dispatch_mode, in_attrs, out_attrs);
+  return dispatch_fallback(out_attrs, dispatch_mode);
 }
 
 /*! \brief _scatter_elemwise_div */
diff --git a/src/operator/tensor/elemwise_sum.cc b/src/operator/tensor/elemwise_sum.cc
index 041a0be..b31dbb2 100644
--- a/src/operator/tensor/elemwise_sum.cc
+++ b/src/operator/tensor/elemwise_sum.cc
@@ -84,7 +84,7 @@ bool ElementWiseSumForwardInferStorageType(const nnvm::NodeAttrs& attrs,
 }
 
 void ElementWiseSumComputeExCPU(const nnvm::NodeAttrs& attrs,
-                                const OpContext& op_ctx,
+                                const OpContext& ctx,
                                 const std::vector<NDArray>& inputs,
                                 const std::vector<OpReqType>& req,
                                 const std::vector<NDArray>& outputs) {
@@ -94,20 +94,20 @@ void ElementWiseSumComputeExCPU(const nnvm::NodeAttrs& attrs,
   if (req[0] == kNullOp) return;
   CHECK_EQ(req[0], kWriteTo) << "ElementWiseSumComputeExCPU only supports req = kWriteTo";
   if (inputs[0].storage_type() == kRowSparseStorage) {
-    mshadow::Stream<cpu>* s = op_ctx.get_stream<cpu>();
-    Resource rsc = ResourceManager::Get()->Request(op_ctx.run_ctx.get_ctx(),
+    mshadow::Stream<cpu>* s = ctx.get_stream<cpu>();
+    Resource rsc = ResourceManager::Get()->Request(ctx.run_ctx.get_ctx(),
         ResourceRequest(ResourceRequest::kTempSpace));
     NDArray out_nd = outputs[0];
     mxnet::ndarray::ElementwiseSum<cpu>(s, rsc, inputs, &out_nd);
   } else {
-    LOG(FATAL) << "Not implemented: " << operator_string(attrs, op_ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
 NNVM_REGISTER_OP(add_n)
+MXNET_ADD_SPARSE_OP_ALIAS(add_n)
+MXNET_ADD_SPARSE_OP_ALIAS(ElementWiseSum)
 .add_alias("ElementWiseSum")
-.add_alias("_sparse_add_n")
-.add_alias("_sparse_ElementWiseSum")
 .describe(R"doc(Adds all input arguments element-wise.
 
 .. math::
diff --git a/src/operator/tensor/elemwise_sum.cu b/src/operator/tensor/elemwise_sum.cu
index 21a80f6..820c8d1 100644
--- a/src/operator/tensor/elemwise_sum.cu
+++ b/src/operator/tensor/elemwise_sum.cu
@@ -29,7 +29,7 @@ namespace mxnet {
 namespace op {
 
 void ElementWiseSumComputeExGPU(const nnvm::NodeAttrs& attrs,
-                                const OpContext& op_ctx,
+                                const OpContext& ctx,
                                 const std::vector<NDArray>& inputs,
                                 const std::vector<OpReqType>& req,
                                 const std::vector<NDArray>& outputs) {
@@ -39,12 +39,11 @@ void ElementWiseSumComputeExGPU(const nnvm::NodeAttrs& attrs,
   if (req[0] == kNullOp) return;
   CHECK_EQ(req[0], kWriteTo) << "ElementWiseSumComputeExGPU only supports req = kWriteTo";
   if (inputs[0].storage_type() == kRowSparseStorage) {
-    mshadow::Stream<gpu>* s = op_ctx.get_stream<gpu>();
+    mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
     NDArray out_nd = outputs[0];
-    mxnet::ndarray::ElementwiseSum<gpu>(s, op_ctx.requested[0], inputs, &out_nd);
+    mxnet::ndarray::ElementwiseSum<gpu>(s, ctx.requested[0], inputs, &out_nd);
   } else {
-    LOG(FATAL) << "Not implemented: "
-               << operator_string(attrs, op_ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
diff --git a/src/operator/tensor/elemwise_unary_op.h b/src/operator/tensor/elemwise_unary_op.h
index 3472d87..b064d34 100644
--- a/src/operator/tensor/elemwise_unary_op.h
+++ b/src/operator/tensor/elemwise_unary_op.h
@@ -318,7 +318,7 @@ class UnaryOp : public OpBase {
     if (in_stype == out_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
       MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, IdentityCompute<xpu>);
     } else {
-      LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+      LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
     }
   }
 
@@ -338,7 +338,7 @@ class UnaryOp : public OpBase {
       // csr, _ -> csr, or rsp, _ -> rsp
       OpBase::CopyNDArray(ctx.get_stream<xpu>(), &outputs[0], req[0], inputs[0]);
     } else {
-      LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+      LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
     }
   }
 };
diff --git a/src/operator/tensor/elemwise_unary_op_basic.cc b/src/operator/tensor/elemwise_unary_op_basic.cc
index 079a33e..13a58d0 100644
--- a/src/operator/tensor/elemwise_unary_op_basic.cc
+++ b/src/operator/tensor/elemwise_unary_op_basic.cc
@@ -62,10 +62,9 @@ static bool IdentityAttrLikeRhsStorageType(const nnvm::NodeAttrs& attrs,
                                      dispatch_mode, DispatchMode::kFComputeEx);
   }
   if (!dispatched) {
-    dispatch_fallback(out_attrs, dispatch_mode);
-    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
+    dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
-  return true;
+  return dispatched;
 }
 
 // relu
diff --git a/src/operator/tensor/indexing_op.h b/src/operator/tensor/indexing_op.h
index 7323f81..1888a41 100644
--- a/src/operator/tensor/indexing_op.h
+++ b/src/operator/tensor/indexing_op.h
@@ -194,12 +194,7 @@ inline bool SparseEmbeddingOpForwardStorageType(const nnvm::NodeAttrs& attrs,
     dispatched = storage_type_assign(&out_stype, kDefaultStorage,
                                      dispatch_mode, DispatchMode::kFComputeEx);
   }
-  if (!dispatched) {
-    // nothing to fallback on
-    LOG(FATAL) << "Not implemented: "
-               << operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
-  }
-  return true;
+  return dispatched;
 }
 
 
@@ -224,12 +219,7 @@ inline bool SparseEmbeddingOpBackwardStorageType(const nnvm::NodeAttrs& attrs,
       dispatched = true;
     }
   }
-  if (!dispatched) {
-    // nothing to fallback on
-    LOG(FATAL) << "Not implemented: "
-               << operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
-  }
-  return true;
+  return dispatched;
 }
 /*! \brief name the struct Take instead of take
  * to avoid conflict with the take function in mshadow
@@ -410,7 +400,7 @@ void SparseEmbeddingOpForwardEx(const nnvm::NodeAttrs& attrs,
       out_stype == kDefaultStorage) {
     SparseEmbeddingOpForwardRspImpl<xpu>(ctx, data.data(), weight, req[0], out.data());
   } else {
-    LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
@@ -597,7 +587,7 @@ void SparseEmbeddingOpBackwardEx(const nnvm::NodeAttrs& attrs,
     SparseEmbeddingOpBackwardRspImpl<xpu>(ctx, ograd.data(), data.data(),
                                           req[embedding::kWeight], weight_grad);
   } else {
-    LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
diff --git a/src/operator/tensor/init_op.h b/src/operator/tensor/init_op.h
index 3f5014d..7ddd0e8 100644
--- a/src/operator/tensor/init_op.h
+++ b/src/operator/tensor/init_op.h
@@ -187,10 +187,9 @@ inline bool InitStorageType(const nnvm::NodeAttrs& attrs,
                                      dispatch_mode, DispatchMode::kFComputeEx);
   }
   if (!dispatched) {
-    dispatch_fallback(out_attrs, dispatch_mode);
-    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
+    dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
-  return true;
+  return dispatched;
 }
 
 /*!
@@ -345,7 +344,7 @@ void FillComputeZerosEx(const nnvm::NodeAttrs& attrs,
   } else if (stype == kCSRStorage) {
     FillZerosCsrImpl(s, outputs[0]);
   } else {
-    LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
diff --git a/src/operator/tensor/matrix_op-inl.h b/src/operator/tensor/matrix_op-inl.h
index 51cffb1..c1ecc06 100644
--- a/src/operator/tensor/matrix_op-inl.h
+++ b/src/operator/tensor/matrix_op-inl.h
@@ -417,13 +417,10 @@ inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs,
   }
 
   if (!dispatched) {
-    dispatch_fallback(out_attrs, dispatch_mode);
-  }
-  if (*dispatch_mode == DispatchMode::kFComputeFallback) {
-    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
+    dispatched = dispatch_fallback(out_attrs, dispatch_mode);
   }
 
-  return true;
+  return dispatched;
 }
 
 // slice the indptr of a csr
diff --git a/src/operator/tensor/matrix_op.cc b/src/operator/tensor/matrix_op.cc
index e8fdce4..9167fcf 100644
--- a/src/operator/tensor/matrix_op.cc
+++ b/src/operator/tensor/matrix_op.cc
@@ -516,11 +516,10 @@ parameter values:
       // otherwise, output is dense (print warning anyway)
       if (!storage_type_assign(&(*out_attrs)[0], kDefaultStorage,
                               dispatch_mode, DispatchMode::kFComputeFallback)) {
-        dispatch_fallback(out_attrs, dispatch_mode);
+        dispatched = dispatch_fallback(out_attrs, dispatch_mode);
       }
-      LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
     }
-    return true;
+    return dispatched;
   })
 .set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{ "_backward_clip" })
 .add_argument("data", "NDArray-or-Symbol", "Input array.")
diff --git a/src/operator/tensor/sparse_retain-inl.h b/src/operator/tensor/sparse_retain-inl.h
index 8caa65e..b230abf 100644
--- a/src/operator/tensor/sparse_retain-inl.h
+++ b/src/operator/tensor/sparse_retain-inl.h
@@ -85,11 +85,7 @@ inline bool SparseRetainForwardInferStorageType(const nnvm::NodeAttrs& attrs,
     dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
                                      dispatch_mode, DispatchMode::kFComputeEx);
   }
-  if (!dispatched) {
-    LOG(FATAL) << "Not implemented: "
-               << operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
-  }
-  return true;
+  return dispatched;
 }
 
 inline bool SparseRetainBackwardInferStorageType(const nnvm::NodeAttrs& attrs,
@@ -111,11 +107,7 @@ inline bool SparseRetainBackwardInferStorageType(const nnvm::NodeAttrs& attrs,
       dispatched = true;
     }
   }
-  if (!dispatched) {
-    LOG(FATAL) << "Not implemented: "
-               << operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
-  }
-  return true;
+  return dispatched;
 }
 
 /*!
diff --git a/src/operator/tensor/square_sum-inl.h b/src/operator/tensor/square_sum-inl.h
index fcc0215..0067ae1 100644
--- a/src/operator/tensor/square_sum-inl.h
+++ b/src/operator/tensor/square_sum-inl.h
@@ -63,12 +63,7 @@ inline bool SquareSumForwardInferStorageType(const nnvm::NodeAttrs& attrs,
       dispatched = storage_type_assign(&out_stype, kDefaultStorage,
                                        dispatch_mode, DispatchMode::kFComputeEx);
   }
-  if (!dispatched) {
-    // nothing to fallback on
-    LOG(FATAL) << "Not implemented: "
-               << operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
-  }
-  return true;
+  return dispatched;
 }
 
 // infer storage function for _backward_square_sum operator on cpu
@@ -88,12 +83,7 @@ inline bool SquareSumBackwardInferStorageType(const nnvm::NodeAttrs& attrs,
     dispatched = storage_type_assign(&grad_stype, kRowSparseStorage,
                                      dispatch_mode, DispatchMode::kFComputeEx);
   }
-  if (!dispatched) {
-    // nothing to fallback on
-    LOG(FATAL) << "Not implemented: "
-               << operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
-  }
-  return true;
+  return dispatched;
 }
 
 /*!
@@ -493,7 +483,7 @@ void SquareSumOpForwardEx(const nnvm::NodeAttrs& attrs,
     NDArray output = outputs[0];
     SquareSumRspImpl(attrs, s, inputs[0], req[0], &output);
   } else {
-    LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 
@@ -515,7 +505,7 @@ void SquareSumOpBackwardEx(const nnvm::NodeAttrs& attrs,
     NDArray output = outputs[0];
     SquareSumRspGradImpl<xpu>(attrs, ctx, inputs[0], inputs[1], req[0], &output);
   } else {
-    LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
+    LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
   }
 }
 

-- 
To stop receiving notification emails like this one, please contact
['"commits@mxnet.apache.org" <co...@mxnet.apache.org>'].