You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2022/08/04 15:22:50 UTC

[GitHub] [incubator-mxnet] bartekkuncer commented on a diff in pull request #21112: [BUGFIX] _npi_repeats with swap

bartekkuncer commented on code in PR #21112:
URL: https://github.com/apache/incubator-mxnet/pull/21112#discussion_r937911565


##########
src/operator/numpy/np_repeat_op-inl.h:
##########
@@ -102,6 +103,14 @@ inline bool RepeatsOpShape(const nnvm::NodeAttrs& attrs,
     return true;
   }
 
+  mxnet::Tuple<int> repts = param.repeats.value();

Review Comment:
   Can you use more descriptive variable names? Without abbreviations if possible.



##########
src/operator/numpy/np_repeat_op-inl.h:
##########
@@ -153,26 +162,27 @@ struct repeat_axis_fwd {
 };
 
 template <typename xpu>
-void NumpyRepeatsOpForward(const nnvm::NodeAttrs& attrs,
-                           const OpContext& ctx,
-                           const std::vector<TBlob>& inputs,
-                           const std::vector<OpReqType>& req,
-                           const std::vector<TBlob>& outputs) {
+void NumpyRepeatsAxisZeroOpForward(const nnvm::NodeAttrs& attrs,

Review Comment:
   Please add brief description of this function.



##########
src/operator/numpy/np_repeat_op-inl.h:
##########
@@ -153,26 +162,27 @@ struct repeat_axis_fwd {
 };
 
 template <typename xpu>
-void NumpyRepeatsOpForward(const nnvm::NodeAttrs& attrs,
-                           const OpContext& ctx,
-                           const std::vector<TBlob>& inputs,
-                           const std::vector<OpReqType>& req,
-                           const std::vector<TBlob>& outputs) {
+void NumpyRepeatsAxisZeroOpForward(const nnvm::NodeAttrs& attrs,
+                                   const OpContext& ctx,
+                                   const std::vector<TBlob>& inputs,
+                                   const std::vector<OpReqType>& req,
+                                   const std::vector<TBlob>& outputs,
+                                   int* ind) {
   using namespace mshadow;
   const TBlob& iTBlob         = inputs[0];
   const mxnet::TShape& ishape = iTBlob.shape_;
-  if (!shape_is_known(ishape))
-    return;
-  Stream<xpu>* s = ctx.get_stream<xpu>();
-
-  int repeats = 0;
+  int repeats                 = 0;
   dmlc::optional<int> axisOpt;
-  int axis                  = -1;
+  // axis is always set to zero, because before calling this function target axis of the repeat
+  // function is swapped with 0th axis
+  int axis                  = 0;

Review Comment:
   constexpr



##########
src/operator/swapaxis-inl.h:
##########
@@ -156,7 +156,7 @@ void SwapAxisCompute(const nnvm::NodeAttrs& attrs,
                      const std::vector<OpReqType>& req,
                      const std::vector<TBlob>& out_data) {
   using namespace mshadow;
-  MSHADOW_TYPE_SWITCH(in_data[swapaxisenum::kData].type_flag_, DType, {
+  MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(in_data[swapaxisenum::kData].type_flag_, DType, {

Review Comment:
   Maybe it is good time to change swapaxisenum to just swapaxis as it is for other ops?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org