You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2021/10/25 14:25:39 UTC

[GitHub] [incubator-mxnet] piotrwolinski-intel opened a new pull request #20699: Piotrwolinski/merge adaptive pooling operator

piotrwolinski-intel opened a new pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699


   Merged MKLDNN adaptive pooling with traditional pooling implementation,
   so that the code redundancy is fixed
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel removed a comment on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel removed a comment on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957943221


   Generally, it looks good! [v1.x] has not been re-formatted yet. Please don't sort headers and don't re-format files here


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] bartekkuncer commented on a change in pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
bartekkuncer commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r738380467



##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,112 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"
 
 namespace mxnet {
 namespace op {
 
-struct AdaptiveAvgPoolParam : public dmlc::Parameter<AdaptiveAvgPoolParam> {
-  mxnet::Tuple<int> output_size;
-  DMLC_DECLARE_PARAMETER(AdaptiveAvgPoolParam) {
-    DMLC_DECLARE_FIELD(output_size).set_default(mxnet::Tuple<int>())
-    .describe("int (output size) or a tuple of int for output (height, width).");
-  }
-  bool operator==(const AdaptiveAvgPoolParam &other) const {
-    return this->output_size == other.output_size;
-  }
-};
-
 static inline bool IsWriting(const OpReqType ort) {
   return ort == kWriteTo || ort == kWriteInplace;
 }
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 
 #if MXNET_USE_CUDA
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 #endif  // MXNET_USE_CUDA
 
 template <typename xpu>
 inline void AdaptiveAvgPoolOpForward(const nnvm::NodeAttrs& attrs,
-                                     const OpContext &ctx,
-                                     const std::vector<TBlob> &inputs,
-                                     const std::vector<OpReqType> &req,
-                                     const std::vector<TBlob> &outputs) {
+                                     const OpContext& ctx,
+                                     const std::vector<TBlob>& inputs,
+                                     const std::vector<OpReqType>& req,
+                                     const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateOutput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
-
 template <typename xpu>
 inline void AdaptiveAvgPoolOpBackward(const nnvm::NodeAttrs& attrs,
-                                      const OpContext &ctx,
-                                      const std::vector<TBlob> &inputs,
-                                      const std::vector<OpReqType> &req,
-                                      const std::vector<TBlob> &outputs) {
+                                      const OpContext& ctx,
+                                      const std::vector<TBlob>& inputs,
+                                      const std::vector<OpReqType>& req,
+                                      const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   if (IsWriting(req[0])) {
     // zero grad before backwarding
-    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
-      Fill<false>(s, outputs[0], kWriteTo, 0);
-    })
+    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { Fill<false>(s, outputs[0], kWriteTo, 0); })
   }
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateGradInput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
 static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
-                                       mxnet::ShapeVector *in_shape,
-                                       mxnet::ShapeVector *out_shape) {
+                                        mxnet::ShapeVector* in_shape,
+                                        mxnet::ShapeVector* out_shape) {
   using namespace mshadow;
   CHECK_EQ(in_shape->size(), 1U) << "Input:[data]";
   CHECK_EQ(out_shape->size(), 1U) << "Output:[data]";
-  const AdaptiveAvgPoolParam& param = nnvm::get<AdaptiveAvgPoolParam>(attrs.parsed);
+  const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
+
   mxnet::TShape dshape(in_shape->at(0));
-  if (mxnet::op::shape_is_none(dshape)) return false;
-  if (param.output_size.ndim() == 0) {
+  if (mxnet::op::shape_is_none(dshape))
+    return false;
+  if (param.output_size.value().ndim() == 0) {
     dshape[2] = 1;
     dshape[3] = 1;

Review comment:
       With changed initialization:
   ```
   if (param.output_size.has_value()) {
      if (param.output_size.value().ndim() == 1) {
       dshape[2] = param.output_size.value()[0];
       dshape[3] = param.output_size.value()[0];
     } else if (param.output_size.value().ndim() == 2) {
       dshape[2] = param.output_size.value()[0];
       dshape[3] = param.output_size.value()[1];
     }
   }
   ```




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel commented on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel commented on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957943221






-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel commented on a change in pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r741259666



##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,115 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"
 
 namespace mxnet {
 namespace op {
 
-struct AdaptiveAvgPoolParam : public dmlc::Parameter<AdaptiveAvgPoolParam> {
-  mxnet::Tuple<int> output_size;
-  DMLC_DECLARE_PARAMETER(AdaptiveAvgPoolParam) {
-    DMLC_DECLARE_FIELD(output_size).set_default(mxnet::Tuple<int>())
-    .describe("int (output size) or a tuple of int for output (height, width).");
-  }
-  bool operator==(const AdaptiveAvgPoolParam &other) const {
-    return this->output_size == other.output_size;
-  }
-};
-
 static inline bool IsWriting(const OpReqType ort) {
   return ort == kWriteTo || ort == kWriteInplace;
 }
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 
 #if MXNET_USE_CUDA
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 #endif  // MXNET_USE_CUDA
 
 template <typename xpu>
 inline void AdaptiveAvgPoolOpForward(const nnvm::NodeAttrs& attrs,
-                                     const OpContext &ctx,
-                                     const std::vector<TBlob> &inputs,
-                                     const std::vector<OpReqType> &req,
-                                     const std::vector<TBlob> &outputs) {
+                                     const OpContext& ctx,
+                                     const std::vector<TBlob>& inputs,
+                                     const std::vector<OpReqType>& req,
+                                     const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateOutput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
-
 template <typename xpu>
 inline void AdaptiveAvgPoolOpBackward(const nnvm::NodeAttrs& attrs,
-                                      const OpContext &ctx,
-                                      const std::vector<TBlob> &inputs,
-                                      const std::vector<OpReqType> &req,
-                                      const std::vector<TBlob> &outputs) {
+                                      const OpContext& ctx,
+                                      const std::vector<TBlob>& inputs,
+                                      const std::vector<OpReqType>& req,
+                                      const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   if (IsWriting(req[0])) {
     // zero grad before backwarding
-    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
-      Fill<false>(s, outputs[0], kWriteTo, 0);
-    })
+    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { Fill<false>(s, outputs[0], kWriteTo, 0); })
   }
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateGradInput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
 static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
-                                       mxnet::ShapeVector *in_shape,
-                                       mxnet::ShapeVector *out_shape) {
+                                        mxnet::ShapeVector* in_shape,
+                                        mxnet::ShapeVector* out_shape) {
   using namespace mshadow;
   CHECK_EQ(in_shape->size(), 1U) << "Input:[data]";
   CHECK_EQ(out_shape->size(), 1U) << "Output:[data]";
-  const AdaptiveAvgPoolParam& param = nnvm::get<AdaptiveAvgPoolParam>(attrs.parsed);
+  const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
+
   mxnet::TShape dshape(in_shape->at(0));
-  if (mxnet::op::shape_is_none(dshape)) return false;
-  if (param.output_size.ndim() == 0) {
-    dshape[2] = 1;
-    dshape[3] = 1;
-  } else if (param.output_size.ndim() == 1) {
-    dshape[2] = param.output_size[0];
-    dshape[3] = param.output_size[0];
-  } else if (param.output_size.ndim() == 2) {
-    dshape[2] = param.output_size[0];
-    dshape[3] = param.output_size[1];
+
+  if (mxnet::op::shape_is_none(dshape))
+    return false;

Review comment:
       Please use {}

##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,115 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"

Review comment:
       Please don't sort the headers.

##########
File path: src/operator/quantization/mkldnn/mkldnn_quantized_pooling.cc
##########
@@ -39,7 +39,7 @@ static void MKLDNNQuantizedPoolingForward(const nnvm::NodeAttrs& attrs,
       << "mkldnn_quantized_pooling op only supports uint8 and int8 as input "
          "type";
   const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
-  MKLDNNPoolingCompute(ctx, param, in_data[0], req[0], out_data[0], nullptr);
+  MKLDNNPoolingCompute(ctx, param, in_data[0], req[0], out_data[0], nullptr, false);

Review comment:
       Could you please add a comment before false?
   ```suggestion
     MKLDNNPoolingCompute(ctx, param, in_data[0], req[0], out_data[0], nullptr, /*use_adaptive*/ false);
   ```

##########
File path: src/operator/nn/mkldnn/mkldnn_pooling.cc
##########
@@ -224,7 +226,7 @@ MKLDNNPoolingFwd& GetPoolingFwd(const PoolingParam& param,
       pooling_fwds;
 #endif
 
-  bool with_workspace = is_train && MKLDNNRequireWorkspace(param);
+  const bool with_workspace = is_train && (MKLDNNRequireWorkspace(param) || use_adaptive_pooling);

Review comment:
       Could you please tell me why you placed `use_adaptive_pooling` here?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel commented on a change in pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r741259666



##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,115 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"
 
 namespace mxnet {
 namespace op {
 
-struct AdaptiveAvgPoolParam : public dmlc::Parameter<AdaptiveAvgPoolParam> {
-  mxnet::Tuple<int> output_size;
-  DMLC_DECLARE_PARAMETER(AdaptiveAvgPoolParam) {
-    DMLC_DECLARE_FIELD(output_size).set_default(mxnet::Tuple<int>())
-    .describe("int (output size) or a tuple of int for output (height, width).");
-  }
-  bool operator==(const AdaptiveAvgPoolParam &other) const {
-    return this->output_size == other.output_size;
-  }
-};
-
 static inline bool IsWriting(const OpReqType ort) {
   return ort == kWriteTo || ort == kWriteInplace;
 }
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 
 #if MXNET_USE_CUDA
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 #endif  // MXNET_USE_CUDA
 
 template <typename xpu>
 inline void AdaptiveAvgPoolOpForward(const nnvm::NodeAttrs& attrs,
-                                     const OpContext &ctx,
-                                     const std::vector<TBlob> &inputs,
-                                     const std::vector<OpReqType> &req,
-                                     const std::vector<TBlob> &outputs) {
+                                     const OpContext& ctx,
+                                     const std::vector<TBlob>& inputs,
+                                     const std::vector<OpReqType>& req,
+                                     const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateOutput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
-
 template <typename xpu>
 inline void AdaptiveAvgPoolOpBackward(const nnvm::NodeAttrs& attrs,
-                                      const OpContext &ctx,
-                                      const std::vector<TBlob> &inputs,
-                                      const std::vector<OpReqType> &req,
-                                      const std::vector<TBlob> &outputs) {
+                                      const OpContext& ctx,
+                                      const std::vector<TBlob>& inputs,
+                                      const std::vector<OpReqType>& req,
+                                      const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   if (IsWriting(req[0])) {
     // zero grad before backwarding
-    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
-      Fill<false>(s, outputs[0], kWriteTo, 0);
-    })
+    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { Fill<false>(s, outputs[0], kWriteTo, 0); })
   }
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateGradInput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
 static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
-                                       mxnet::ShapeVector *in_shape,
-                                       mxnet::ShapeVector *out_shape) {
+                                        mxnet::ShapeVector* in_shape,
+                                        mxnet::ShapeVector* out_shape) {
   using namespace mshadow;
   CHECK_EQ(in_shape->size(), 1U) << "Input:[data]";
   CHECK_EQ(out_shape->size(), 1U) << "Output:[data]";
-  const AdaptiveAvgPoolParam& param = nnvm::get<AdaptiveAvgPoolParam>(attrs.parsed);
+  const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
+
   mxnet::TShape dshape(in_shape->at(0));
-  if (mxnet::op::shape_is_none(dshape)) return false;
-  if (param.output_size.ndim() == 0) {
-    dshape[2] = 1;
-    dshape[3] = 1;
-  } else if (param.output_size.ndim() == 1) {
-    dshape[2] = param.output_size[0];
-    dshape[3] = param.output_size[0];
-  } else if (param.output_size.ndim() == 2) {
-    dshape[2] = param.output_size[0];
-    dshape[3] = param.output_size[1];
+
+  if (mxnet::op::shape_is_none(dshape))
+    return false;

Review comment:
       Please use {}

##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,115 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"

Review comment:
       Please don't sort the headers.

##########
File path: src/operator/quantization/mkldnn/mkldnn_quantized_pooling.cc
##########
@@ -39,7 +39,7 @@ static void MKLDNNQuantizedPoolingForward(const nnvm::NodeAttrs& attrs,
       << "mkldnn_quantized_pooling op only supports uint8 and int8 as input "
          "type";
   const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
-  MKLDNNPoolingCompute(ctx, param, in_data[0], req[0], out_data[0], nullptr);
+  MKLDNNPoolingCompute(ctx, param, in_data[0], req[0], out_data[0], nullptr, false);

Review comment:
       Could you please add a comment before false?
   ```suggestion
     MKLDNNPoolingCompute(ctx, param, in_data[0], req[0], out_data[0], nullptr, /*use_adaptive*/ false);
   ```

##########
File path: src/operator/nn/mkldnn/mkldnn_pooling.cc
##########
@@ -224,7 +226,7 @@ MKLDNNPoolingFwd& GetPoolingFwd(const PoolingParam& param,
       pooling_fwds;
 #endif
 
-  bool with_workspace = is_train && MKLDNNRequireWorkspace(param);
+  const bool with_workspace = is_train && (MKLDNNRequireWorkspace(param) || use_adaptive_pooling);

Review comment:
       Could you please tell me why you placed `use_adaptive_pooling` here?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel removed a comment on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel removed a comment on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957943221


   Generally, it looks good! [v1.x] has not been re-formatted yet. Please don't sort headers and don't re-format files here


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] bartekkuncer commented on a change in pull request #20699: Piotrwolinski/merge adaptive pooling operator

Posted by GitBox <gi...@apache.org>.
bartekkuncer commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r735669754



##########
File path: src/operator/nn/mkldnn/mkldnn_pooling-inl.h
##########
@@ -87,6 +87,26 @@ class MKLDNNPoolingBwd {
   const mkldnn::pooling_backward::primitive_desc& GetPd();
 };
 
+template <typename T = mkldnn::memory::dims>
+void useAdaptivePaddingKernel(T* kernel,

Review comment:
       UseAdaptivePaddingKernel




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mxnet-bot commented on pull request #20699: Piotrwolinski/merge adaptive pooling operator

Posted by GitBox <gi...@apache.org>.
mxnet-bot commented on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-950984630


   Hey @piotrwolinski-intel , Thanks for submitting the PR 
   All tests are already queued to run once. If tests fail, you can trigger one or more tests again with the following commands: 
   - To trigger all jobs: @mxnet-bot run ci [all] 
   - To trigger specific jobs: @mxnet-bot run ci [job1, job2] 
   *** 
   **CI supported jobs**: [sanity, windows-cpu, website, centos-gpu, unix-cpu, clang, miscellaneous, edge, windows-gpu, unix-gpu, centos-cpu]
   *** 
   _Note_: 
    Only following 3 categories can trigger CI :PR Author, MXNet Committer, Jenkins Admin. 
   All CI tests must pass before the PR can be merged. 
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] piotrwolinski-intel commented on a change in pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
piotrwolinski-intel commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r740967333



##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,112 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"
 
 namespace mxnet {
 namespace op {
 
-struct AdaptiveAvgPoolParam : public dmlc::Parameter<AdaptiveAvgPoolParam> {
-  mxnet::Tuple<int> output_size;
-  DMLC_DECLARE_PARAMETER(AdaptiveAvgPoolParam) {
-    DMLC_DECLARE_FIELD(output_size).set_default(mxnet::Tuple<int>())
-    .describe("int (output size) or a tuple of int for output (height, width).");
-  }
-  bool operator==(const AdaptiveAvgPoolParam &other) const {
-    return this->output_size == other.output_size;
-  }
-};
-
 static inline bool IsWriting(const OpReqType ort) {
   return ort == kWriteTo || ort == kWriteInplace;
 }
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 
 #if MXNET_USE_CUDA
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 #endif  // MXNET_USE_CUDA
 
 template <typename xpu>
 inline void AdaptiveAvgPoolOpForward(const nnvm::NodeAttrs& attrs,
-                                     const OpContext &ctx,
-                                     const std::vector<TBlob> &inputs,
-                                     const std::vector<OpReqType> &req,
-                                     const std::vector<TBlob> &outputs) {
+                                     const OpContext& ctx,
+                                     const std::vector<TBlob>& inputs,
+                                     const std::vector<OpReqType>& req,
+                                     const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateOutput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
-
 template <typename xpu>
 inline void AdaptiveAvgPoolOpBackward(const nnvm::NodeAttrs& attrs,
-                                      const OpContext &ctx,
-                                      const std::vector<TBlob> &inputs,
-                                      const std::vector<OpReqType> &req,
-                                      const std::vector<TBlob> &outputs) {
+                                      const OpContext& ctx,
+                                      const std::vector<TBlob>& inputs,
+                                      const std::vector<OpReqType>& req,
+                                      const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   if (IsWriting(req[0])) {
     // zero grad before backwarding
-    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
-      Fill<false>(s, outputs[0], kWriteTo, 0);
-    })
+    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { Fill<false>(s, outputs[0], kWriteTo, 0); })
   }
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateGradInput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
 static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
-                                       mxnet::ShapeVector *in_shape,
-                                       mxnet::ShapeVector *out_shape) {
+                                        mxnet::ShapeVector* in_shape,
+                                        mxnet::ShapeVector* out_shape) {
   using namespace mshadow;
   CHECK_EQ(in_shape->size(), 1U) << "Input:[data]";
   CHECK_EQ(out_shape->size(), 1U) << "Output:[data]";
-  const AdaptiveAvgPoolParam& param = nnvm::get<AdaptiveAvgPoolParam>(attrs.parsed);
+  const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
+
   mxnet::TShape dshape(in_shape->at(0));
-  if (mxnet::op::shape_is_none(dshape)) return false;
-  if (param.output_size.ndim() == 0) {
+  if (mxnet::op::shape_is_none(dshape))
+    return false;
+  if (param.output_size.value().ndim() == 0) {
     dshape[2] = 1;
     dshape[3] = 1;

Review comment:
       Done




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel commented on a change in pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r741259666



##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,115 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"
 
 namespace mxnet {
 namespace op {
 
-struct AdaptiveAvgPoolParam : public dmlc::Parameter<AdaptiveAvgPoolParam> {
-  mxnet::Tuple<int> output_size;
-  DMLC_DECLARE_PARAMETER(AdaptiveAvgPoolParam) {
-    DMLC_DECLARE_FIELD(output_size).set_default(mxnet::Tuple<int>())
-    .describe("int (output size) or a tuple of int for output (height, width).");
-  }
-  bool operator==(const AdaptiveAvgPoolParam &other) const {
-    return this->output_size == other.output_size;
-  }
-};
-
 static inline bool IsWriting(const OpReqType ort) {
   return ort == kWriteTo || ort == kWriteInplace;
 }
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 
 #if MXNET_USE_CUDA
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 #endif  // MXNET_USE_CUDA
 
 template <typename xpu>
 inline void AdaptiveAvgPoolOpForward(const nnvm::NodeAttrs& attrs,
-                                     const OpContext &ctx,
-                                     const std::vector<TBlob> &inputs,
-                                     const std::vector<OpReqType> &req,
-                                     const std::vector<TBlob> &outputs) {
+                                     const OpContext& ctx,
+                                     const std::vector<TBlob>& inputs,
+                                     const std::vector<OpReqType>& req,
+                                     const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateOutput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
-
 template <typename xpu>
 inline void AdaptiveAvgPoolOpBackward(const nnvm::NodeAttrs& attrs,
-                                      const OpContext &ctx,
-                                      const std::vector<TBlob> &inputs,
-                                      const std::vector<OpReqType> &req,
-                                      const std::vector<TBlob> &outputs) {
+                                      const OpContext& ctx,
+                                      const std::vector<TBlob>& inputs,
+                                      const std::vector<OpReqType>& req,
+                                      const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   if (IsWriting(req[0])) {
     // zero grad before backwarding
-    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
-      Fill<false>(s, outputs[0], kWriteTo, 0);
-    })
+    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { Fill<false>(s, outputs[0], kWriteTo, 0); })
   }
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateGradInput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
 static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
-                                       mxnet::ShapeVector *in_shape,
-                                       mxnet::ShapeVector *out_shape) {
+                                        mxnet::ShapeVector* in_shape,
+                                        mxnet::ShapeVector* out_shape) {
   using namespace mshadow;
   CHECK_EQ(in_shape->size(), 1U) << "Input:[data]";
   CHECK_EQ(out_shape->size(), 1U) << "Output:[data]";
-  const AdaptiveAvgPoolParam& param = nnvm::get<AdaptiveAvgPoolParam>(attrs.parsed);
+  const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
+
   mxnet::TShape dshape(in_shape->at(0));
-  if (mxnet::op::shape_is_none(dshape)) return false;
-  if (param.output_size.ndim() == 0) {
-    dshape[2] = 1;
-    dshape[3] = 1;
-  } else if (param.output_size.ndim() == 1) {
-    dshape[2] = param.output_size[0];
-    dshape[3] = param.output_size[0];
-  } else if (param.output_size.ndim() == 2) {
-    dshape[2] = param.output_size[0];
-    dshape[3] = param.output_size[1];
+
+  if (mxnet::op::shape_is_none(dshape))
+    return false;

Review comment:
       Please use {}

##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,115 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"

Review comment:
       Please don't sort the headers.

##########
File path: src/operator/quantization/mkldnn/mkldnn_quantized_pooling.cc
##########
@@ -39,7 +39,7 @@ static void MKLDNNQuantizedPoolingForward(const nnvm::NodeAttrs& attrs,
       << "mkldnn_quantized_pooling op only supports uint8 and int8 as input "
          "type";
   const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
-  MKLDNNPoolingCompute(ctx, param, in_data[0], req[0], out_data[0], nullptr);
+  MKLDNNPoolingCompute(ctx, param, in_data[0], req[0], out_data[0], nullptr, false);

Review comment:
       Could you please add a comment before false?
   ```suggestion
     MKLDNNPoolingCompute(ctx, param, in_data[0], req[0], out_data[0], nullptr, /*use_adaptive*/ false);
   ```

##########
File path: src/operator/nn/mkldnn/mkldnn_pooling.cc
##########
@@ -224,7 +226,7 @@ MKLDNNPoolingFwd& GetPoolingFwd(const PoolingParam& param,
       pooling_fwds;
 #endif
 
-  bool with_workspace = is_train && MKLDNNRequireWorkspace(param);
+  const bool with_workspace = is_train && (MKLDNNRequireWorkspace(param) || use_adaptive_pooling);

Review comment:
       Could you please tell me why you placed `use_adaptive_pooling` here?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel edited a comment on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel edited a comment on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957943221






-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel edited a comment on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel edited a comment on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957943221


   Generally, it looks good! [v1.x] has not been re-formatted yet. Please don't sort headers and don't re-format files here


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] piotrwolinski-intel commented on a change in pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
piotrwolinski-intel commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r740967333



##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,112 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"
 
 namespace mxnet {
 namespace op {
 
-struct AdaptiveAvgPoolParam : public dmlc::Parameter<AdaptiveAvgPoolParam> {
-  mxnet::Tuple<int> output_size;
-  DMLC_DECLARE_PARAMETER(AdaptiveAvgPoolParam) {
-    DMLC_DECLARE_FIELD(output_size).set_default(mxnet::Tuple<int>())
-    .describe("int (output size) or a tuple of int for output (height, width).");
-  }
-  bool operator==(const AdaptiveAvgPoolParam &other) const {
-    return this->output_size == other.output_size;
-  }
-};
-
 static inline bool IsWriting(const OpReqType ort) {
   return ort == kWriteTo || ort == kWriteInplace;
 }
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 
 #if MXNET_USE_CUDA
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 #endif  // MXNET_USE_CUDA
 
 template <typename xpu>
 inline void AdaptiveAvgPoolOpForward(const nnvm::NodeAttrs& attrs,
-                                     const OpContext &ctx,
-                                     const std::vector<TBlob> &inputs,
-                                     const std::vector<OpReqType> &req,
-                                     const std::vector<TBlob> &outputs) {
+                                     const OpContext& ctx,
+                                     const std::vector<TBlob>& inputs,
+                                     const std::vector<OpReqType>& req,
+                                     const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateOutput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
-
 template <typename xpu>
 inline void AdaptiveAvgPoolOpBackward(const nnvm::NodeAttrs& attrs,
-                                      const OpContext &ctx,
-                                      const std::vector<TBlob> &inputs,
-                                      const std::vector<OpReqType> &req,
-                                      const std::vector<TBlob> &outputs) {
+                                      const OpContext& ctx,
+                                      const std::vector<TBlob>& inputs,
+                                      const std::vector<OpReqType>& req,
+                                      const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   if (IsWriting(req[0])) {
     // zero grad before backwarding
-    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
-      Fill<false>(s, outputs[0], kWriteTo, 0);
-    })
+    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { Fill<false>(s, outputs[0], kWriteTo, 0); })
   }
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateGradInput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
 static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
-                                       mxnet::ShapeVector *in_shape,
-                                       mxnet::ShapeVector *out_shape) {
+                                        mxnet::ShapeVector* in_shape,
+                                        mxnet::ShapeVector* out_shape) {
   using namespace mshadow;
   CHECK_EQ(in_shape->size(), 1U) << "Input:[data]";
   CHECK_EQ(out_shape->size(), 1U) << "Output:[data]";
-  const AdaptiveAvgPoolParam& param = nnvm::get<AdaptiveAvgPoolParam>(attrs.parsed);
+  const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
+
   mxnet::TShape dshape(in_shape->at(0));
-  if (mxnet::op::shape_is_none(dshape)) return false;
-  if (param.output_size.ndim() == 0) {
+  if (mxnet::op::shape_is_none(dshape))
+    return false;
+  if (param.output_size.value().ndim() == 0) {
     dshape[2] = 1;
     dshape[3] = 1;

Review comment:
       Done




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mxnet-bot commented on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mxnet-bot commented on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-960819138


   Jenkins CI successfully triggered : [unix-cpu]


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel commented on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel commented on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-961101327


   @szha Could you help with review/merge?


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel edited a comment on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel edited a comment on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957943221






-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel commented on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel commented on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957943221


   Generally, it looks good! This pull request should solve a one-specific problem. Please don't sort headers and don't re-format files here.


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel commented on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel commented on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957943221






-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] szha merged pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
szha merged pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699


   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] piotrwolinski-intel commented on a change in pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
piotrwolinski-intel commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r742590164



##########
File path: src/operator/nn/mkldnn/mkldnn_pooling.cc
##########
@@ -224,7 +226,7 @@ MKLDNNPoolingFwd& GetPoolingFwd(const PoolingParam& param,
       pooling_fwds;
 #endif
 
-  bool with_workspace = is_train && MKLDNNRequireWorkspace(param);
+  const bool with_workspace = is_train && (MKLDNNRequireWorkspace(param) || use_adaptive_pooling);

Review comment:
       Because this logic is merged from the adaptive pooling and condition there was:
   `bool with_workspace = is_train;`




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] piotrwolinski-intel commented on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
piotrwolinski-intel commented on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-960818987


   @mxnet-bot run ci [unix-cpu]


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] piotrwolinski-intel commented on a change in pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
piotrwolinski-intel commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r742638456



##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,115 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"
 
 namespace mxnet {
 namespace op {
 
-struct AdaptiveAvgPoolParam : public dmlc::Parameter<AdaptiveAvgPoolParam> {
-  mxnet::Tuple<int> output_size;
-  DMLC_DECLARE_PARAMETER(AdaptiveAvgPoolParam) {
-    DMLC_DECLARE_FIELD(output_size).set_default(mxnet::Tuple<int>())
-    .describe("int (output size) or a tuple of int for output (height, width).");
-  }
-  bool operator==(const AdaptiveAvgPoolParam &other) const {
-    return this->output_size == other.output_size;
-  }
-};
-
 static inline bool IsWriting(const OpReqType ort) {
   return ort == kWriteTo || ort == kWriteInplace;
 }
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 
 #if MXNET_USE_CUDA
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 #endif  // MXNET_USE_CUDA
 
 template <typename xpu>
 inline void AdaptiveAvgPoolOpForward(const nnvm::NodeAttrs& attrs,
-                                     const OpContext &ctx,
-                                     const std::vector<TBlob> &inputs,
-                                     const std::vector<OpReqType> &req,
-                                     const std::vector<TBlob> &outputs) {
+                                     const OpContext& ctx,
+                                     const std::vector<TBlob>& inputs,
+                                     const std::vector<OpReqType>& req,
+                                     const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateOutput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
-
 template <typename xpu>
 inline void AdaptiveAvgPoolOpBackward(const nnvm::NodeAttrs& attrs,
-                                      const OpContext &ctx,
-                                      const std::vector<TBlob> &inputs,
-                                      const std::vector<OpReqType> &req,
-                                      const std::vector<TBlob> &outputs) {
+                                      const OpContext& ctx,
+                                      const std::vector<TBlob>& inputs,
+                                      const std::vector<OpReqType>& req,
+                                      const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   if (IsWriting(req[0])) {
     // zero grad before backwarding
-    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
-      Fill<false>(s, outputs[0], kWriteTo, 0);
-    })
+    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { Fill<false>(s, outputs[0], kWriteTo, 0); })
   }
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateGradInput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
 static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
-                                       mxnet::ShapeVector *in_shape,
-                                       mxnet::ShapeVector *out_shape) {
+                                        mxnet::ShapeVector* in_shape,
+                                        mxnet::ShapeVector* out_shape) {
   using namespace mshadow;
   CHECK_EQ(in_shape->size(), 1U) << "Input:[data]";
   CHECK_EQ(out_shape->size(), 1U) << "Output:[data]";
-  const AdaptiveAvgPoolParam& param = nnvm::get<AdaptiveAvgPoolParam>(attrs.parsed);
+  const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
+
   mxnet::TShape dshape(in_shape->at(0));
-  if (mxnet::op::shape_is_none(dshape)) return false;
-  if (param.output_size.ndim() == 0) {
-    dshape[2] = 1;
-    dshape[3] = 1;
-  } else if (param.output_size.ndim() == 1) {
-    dshape[2] = param.output_size[0];
-    dshape[3] = param.output_size[0];
-  } else if (param.output_size.ndim() == 2) {
-    dshape[2] = param.output_size[0];
-    dshape[3] = param.output_size[1];
+
+  if (mxnet::op::shape_is_none(dshape))
+    return false;

Review comment:
       Done




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] bartekkuncer commented on a change in pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
bartekkuncer commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r738377700



##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,112 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"
 
 namespace mxnet {
 namespace op {
 
-struct AdaptiveAvgPoolParam : public dmlc::Parameter<AdaptiveAvgPoolParam> {
-  mxnet::Tuple<int> output_size;
-  DMLC_DECLARE_PARAMETER(AdaptiveAvgPoolParam) {
-    DMLC_DECLARE_FIELD(output_size).set_default(mxnet::Tuple<int>())
-    .describe("int (output size) or a tuple of int for output (height, width).");
-  }
-  bool operator==(const AdaptiveAvgPoolParam &other) const {
-    return this->output_size == other.output_size;
-  }
-};
-
 static inline bool IsWriting(const OpReqType ort) {
   return ort == kWriteTo || ort == kWriteInplace;
 }
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 
 #if MXNET_USE_CUDA
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 #endif  // MXNET_USE_CUDA
 
 template <typename xpu>
 inline void AdaptiveAvgPoolOpForward(const nnvm::NodeAttrs& attrs,
-                                     const OpContext &ctx,
-                                     const std::vector<TBlob> &inputs,
-                                     const std::vector<OpReqType> &req,
-                                     const std::vector<TBlob> &outputs) {
+                                     const OpContext& ctx,
+                                     const std::vector<TBlob>& inputs,
+                                     const std::vector<OpReqType>& req,
+                                     const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateOutput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
-
 template <typename xpu>
 inline void AdaptiveAvgPoolOpBackward(const nnvm::NodeAttrs& attrs,
-                                      const OpContext &ctx,
-                                      const std::vector<TBlob> &inputs,
-                                      const std::vector<OpReqType> &req,
-                                      const std::vector<TBlob> &outputs) {
+                                      const OpContext& ctx,
+                                      const std::vector<TBlob>& inputs,
+                                      const std::vector<OpReqType>& req,
+                                      const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   if (IsWriting(req[0])) {
     // zero grad before backwarding
-    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
-      Fill<false>(s, outputs[0], kWriteTo, 0);
-    })
+    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { Fill<false>(s, outputs[0], kWriteTo, 0); })
   }
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateGradInput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
 static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
-                                       mxnet::ShapeVector *in_shape,
-                                       mxnet::ShapeVector *out_shape) {
+                                        mxnet::ShapeVector* in_shape,
+                                        mxnet::ShapeVector* out_shape) {
   using namespace mshadow;
   CHECK_EQ(in_shape->size(), 1U) << "Input:[data]";
   CHECK_EQ(out_shape->size(), 1U) << "Output:[data]";
-  const AdaptiveAvgPoolParam& param = nnvm::get<AdaptiveAvgPoolParam>(attrs.parsed);
+  const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
+
   mxnet::TShape dshape(in_shape->at(0));

Review comment:
       Maybe initialize with `{in_shape->at(0),1,1}`?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel commented on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel commented on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957946763


   Generally, it looks good! [v1.x] has not been re-formatted yet. Please don't sort headers and don't re-format files here


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel edited a comment on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel edited a comment on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957943221


   Generally, it looks good! [v1.x] has not been re-formatted yet. Could you please don't sort headers and don't re-format files here?


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] piotrwolinski-intel commented on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
piotrwolinski-intel commented on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957746267


   @szha Please help with the merge.


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] piotrwolinski-intel commented on a change in pull request #20699: Piotrwolinski/merge adaptive pooling operator

Posted by GitBox <gi...@apache.org>.
piotrwolinski-intel commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r735678209



##########
File path: src/operator/nn/mkldnn/mkldnn_pooling-inl.h
##########
@@ -87,6 +87,26 @@ class MKLDNNPoolingBwd {
   const mkldnn::pooling_backward::primitive_desc& GetPd();
 };
 
+template <typename T = mkldnn::memory::dims>
+void useAdaptivePaddingKernel(T* kernel,

Review comment:
       Done




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] mozga-intel removed a comment on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
mozga-intel removed a comment on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957943221


   Generally, it looks good! [v1.x] has not been re-formatted yet. Please don't sort headers and don't re-format files here


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] piotrwolinski-intel commented on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
piotrwolinski-intel commented on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957746267


   @szha Please help with the merge.


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] piotrwolinski-intel commented on pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
piotrwolinski-intel commented on pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#issuecomment-957746267


   @szha Please help with the merge.


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] piotrwolinski-intel commented on a change in pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
piotrwolinski-intel commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r740967333



##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,112 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"
 
 namespace mxnet {
 namespace op {
 
-struct AdaptiveAvgPoolParam : public dmlc::Parameter<AdaptiveAvgPoolParam> {
-  mxnet::Tuple<int> output_size;
-  DMLC_DECLARE_PARAMETER(AdaptiveAvgPoolParam) {
-    DMLC_DECLARE_FIELD(output_size).set_default(mxnet::Tuple<int>())
-    .describe("int (output size) or a tuple of int for output (height, width).");
-  }
-  bool operator==(const AdaptiveAvgPoolParam &other) const {
-    return this->output_size == other.output_size;
-  }
-};
-
 static inline bool IsWriting(const OpReqType ort) {
   return ort == kWriteTo || ort == kWriteInplace;
 }
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 
 #if MXNET_USE_CUDA
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 #endif  // MXNET_USE_CUDA
 
 template <typename xpu>
 inline void AdaptiveAvgPoolOpForward(const nnvm::NodeAttrs& attrs,
-                                     const OpContext &ctx,
-                                     const std::vector<TBlob> &inputs,
-                                     const std::vector<OpReqType> &req,
-                                     const std::vector<TBlob> &outputs) {
+                                     const OpContext& ctx,
+                                     const std::vector<TBlob>& inputs,
+                                     const std::vector<OpReqType>& req,
+                                     const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateOutput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
-
 template <typename xpu>
 inline void AdaptiveAvgPoolOpBackward(const nnvm::NodeAttrs& attrs,
-                                      const OpContext &ctx,
-                                      const std::vector<TBlob> &inputs,
-                                      const std::vector<OpReqType> &req,
-                                      const std::vector<TBlob> &outputs) {
+                                      const OpContext& ctx,
+                                      const std::vector<TBlob>& inputs,
+                                      const std::vector<OpReqType>& req,
+                                      const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   if (IsWriting(req[0])) {
     // zero grad before backwarding
-    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
-      Fill<false>(s, outputs[0], kWriteTo, 0);
-    })
+    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { Fill<false>(s, outputs[0], kWriteTo, 0); })
   }
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateGradInput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
 static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
-                                       mxnet::ShapeVector *in_shape,
-                                       mxnet::ShapeVector *out_shape) {
+                                        mxnet::ShapeVector* in_shape,
+                                        mxnet::ShapeVector* out_shape) {
   using namespace mshadow;
   CHECK_EQ(in_shape->size(), 1U) << "Input:[data]";
   CHECK_EQ(out_shape->size(), 1U) << "Output:[data]";
-  const AdaptiveAvgPoolParam& param = nnvm::get<AdaptiveAvgPoolParam>(attrs.parsed);
+  const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
+
   mxnet::TShape dshape(in_shape->at(0));
-  if (mxnet::op::shape_is_none(dshape)) return false;
-  if (param.output_size.ndim() == 0) {
+  if (mxnet::op::shape_is_none(dshape))
+    return false;
+  if (param.output_size.value().ndim() == 0) {
     dshape[2] = 1;
     dshape[3] = 1;

Review comment:
       Done




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [incubator-mxnet] szha merged pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

Posted by GitBox <gi...@apache.org>.
szha merged pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699






-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org