You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2021/10/28 13:14:56 UTC

[GitHub] [incubator-mxnet] bartekkuncer commented on a change in pull request #20699: [v1.x] Merge MKLDNN adaptive pooling with traditional pooling implementation

bartekkuncer commented on a change in pull request #20699:
URL: https://github.com/apache/incubator-mxnet/pull/20699#discussion_r738377700



##########
File path: src/operator/contrib/adaptive_avg_pooling-inl.h
##########
@@ -21,125 +21,112 @@
  * \file adaptive_avg_pooling-inl.h
  * \brief adaptive average pooling operator
  * \author Hang Zhang
-*/
+ */
 #ifndef MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 #define MXNET_OPERATOR_CONTRIB_ADAPTIVE_AVG_POOLING_INL_H_
 
 #include <dmlc/logging.h>
 #include <dmlc/parameter.h>
-#include <mxnet/operator.h>
 #include <mxnet/ndarray.h>
+#include <mxnet/operator.h>
+
 #include <map>
-#include <vector>
 #include <string>
 #include <utility>
+#include <vector>
 /* contrib
 #include "../ndarray/ndarray_function.h"
-#include "./operator_common.h"
-#include "./mxnet_op.h"
 #include "./mshadow_op.h"
+#include "./mxnet_op.h"
+#include "./operator_common.h"
 */
 #include "../../ndarray/ndarray_function.h"
-#include "../operator_common.h"
-#include "../mxnet_op.h"
 #include "../mshadow_op.h"
-#if MXNET_USE_MKLDNN == 1
-#include "../nn/mkldnn/mkldnn_adaptive_pooling-inl.h"
-#endif
+#include "../mxnet_op.h"
+#include "../nn/pooling-inl.h"
+#include "../operator_common.h"
 
 namespace mxnet {
 namespace op {
 
-struct AdaptiveAvgPoolParam : public dmlc::Parameter<AdaptiveAvgPoolParam> {
-  mxnet::Tuple<int> output_size;
-  DMLC_DECLARE_PARAMETER(AdaptiveAvgPoolParam) {
-    DMLC_DECLARE_FIELD(output_size).set_default(mxnet::Tuple<int>())
-    .describe("int (output size) or a tuple of int for output (height, width).");
-  }
-  bool operator==(const AdaptiveAvgPoolParam &other) const {
-    return this->output_size == other.output_size;
-  }
-};
-
 static inline bool IsWriting(const OpReqType ort) {
   return ort == kWriteTo || ort == kWriteInplace;
 }
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<cpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 
 #if MXNET_USE_CUDA
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu> *s,
-                                 const std::vector<TBlob> &input,
-                                 const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu>* s,
+                                 const std::vector<TBlob>& input,
+                                 const std::vector<TBlob>& output);
 
-template<typename xpu, typename DType, typename AccReal>
-void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu> *s,
-                                    const std::vector<TBlob> &input,
-                                    const std::vector<TBlob> &output);
+template <typename xpu, typename DType, typename AccReal>
+void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu>* s,
+                                    const std::vector<TBlob>& input,
+                                    const std::vector<TBlob>& output);
 #endif  // MXNET_USE_CUDA
 
 template <typename xpu>
 inline void AdaptiveAvgPoolOpForward(const nnvm::NodeAttrs& attrs,
-                                     const OpContext &ctx,
-                                     const std::vector<TBlob> &inputs,
-                                     const std::vector<OpReqType> &req,
-                                     const std::vector<TBlob> &outputs) {
+                                     const OpContext& ctx,
+                                     const std::vector<TBlob>& inputs,
+                                     const std::vector<OpReqType>& req,
+                                     const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateOutput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
-
 template <typename xpu>
 inline void AdaptiveAvgPoolOpBackward(const nnvm::NodeAttrs& attrs,
-                                      const OpContext &ctx,
-                                      const std::vector<TBlob> &inputs,
-                                      const std::vector<OpReqType> &req,
-                                      const std::vector<TBlob> &outputs) {
+                                      const OpContext& ctx,
+                                      const std::vector<TBlob>& inputs,
+                                      const std::vector<OpReqType>& req,
+                                      const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
+  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   if (IsWriting(req[0])) {
     // zero grad before backwarding
-    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
-      Fill<false>(s, outputs[0], kWriteTo, 0);
-    })
+    MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { Fill<false>(s, outputs[0], kWriteTo, 0); })
   }
   MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
     AdaptiveAvgPoolUpdateGradInput<xpu, DType, AccReal>(s, inputs, outputs);
   });
 }
 
 static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
-                                       mxnet::ShapeVector *in_shape,
-                                       mxnet::ShapeVector *out_shape) {
+                                        mxnet::ShapeVector* in_shape,
+                                        mxnet::ShapeVector* out_shape) {
   using namespace mshadow;
   CHECK_EQ(in_shape->size(), 1U) << "Input:[data]";
   CHECK_EQ(out_shape->size(), 1U) << "Output:[data]";
-  const AdaptiveAvgPoolParam& param = nnvm::get<AdaptiveAvgPoolParam>(attrs.parsed);
+  const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
+
   mxnet::TShape dshape(in_shape->at(0));

Review comment:
       Maybe initialize with `{in_shape->at(0),1,1}`?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org