You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/01/18 19:40:29 UTC

[GitHub] cjolivier01 commented on a change in pull request #9479: add norm operator for sparse ndarray

cjolivier01 commented on a change in pull request #9479: add norm operator for sparse ndarray 
URL: https://github.com/apache/incubator-mxnet/pull/9479#discussion_r162447097
 
 

 ##########
 File path: src/operator/tensor/broadcast_reduce_op.h
 ##########
 @@ -821,20 +821,93 @@ struct ReduceGrad {
   }
 };
 
+inline bool L2NormStorageType(const nnvm::NodeAttrs& attrs,
+                              const int dev_mask,
+                              DispatchMode* dispatch_mode,
+                              std::vector<int>* in_attrs,
+                              std::vector<int>* out_attrs) {
+  CHECK_EQ(in_attrs->size(), 1U);
+  CHECK_EQ(out_attrs->size(), 1U);
+  const int in_stype = in_attrs->at(0);
+  int& out_stype = out_attrs->at(0);
+  bool dispatched = false;
+  if (!dispatched && in_stype == kDefaultStorage) {
+    // dns -> dns
+    dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode,
+                                     DispatchMode::kFCompute);
+  }
+  if (!dispatched && (in_stype == kCSRStorage || in_stype == kRowSparseStorage)) {
+    // csr/rsp -> dns
+    dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode,
+                                     DispatchMode::kFComputeEx);
+  }
+  if (!dispatched) {
+    dispatch_fallback(out_attrs, dispatch_mode);
+  }
+  if (*dispatch_mode == DispatchMode::kFComputeFallback) {
+    LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
+  }
+  return true;
+}
+
+template<typename xpu>
+void L2NormComputeImpl(mshadow::Stream<xpu> *s,
+                       const TBlob& input,
+                       const OpReqType req,
+                       const TBlob& output) {
+  MSHADOW_REAL_TYPE_SWITCH(output.type_flag_, DType, {
+    mshadow::Tensor<xpu, 1, DType> out = output.get<xpu, 1, DType>(s);
+    mshadow::Tensor<xpu, 1, DType> in = input.get_with_shape<xpu, 1, DType>(
+      mshadow::Shape1(input.shape_.Size()), s);
+    mshadow::VectorDot(out, in, in);
 
 Review comment:
   I thought we were trying to get away from mshadow stuff?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services