You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by ha...@apache.org on 2018/08/10 20:56:39 UTC

[incubator-mxnet] branch master updated: rm wrong infertype for AdaptiveAvgPool and BilinearReisze2D (#12098)

This is an automated email from the ASF dual-hosted git repository.

haibin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new f7211b2  rm wrong infertype for AdaptiveAvgPool and BilinearReisze2D (#12098)
f7211b2 is described below

commit f7211b227c912abed58435bcd288ced7d28d9ef0
Author: Hang Zhang <80...@users.noreply.github.com>
AuthorDate: Fri Aug 10 13:56:29 2018 -0700

    rm wrong infertype for AdaptiveAvgPool and BilinearReisze2D (#12098)
---
 src/operator/contrib/adaptive_avg_pooling-inl.h | 35 ------------------------
 src/operator/contrib/adaptive_avg_pooling.cc    |  3 ---
 src/operator/contrib/bilinear_resize-inl.h      | 36 -------------------------
 src/operator/contrib/bilinear_resize.cc         |  3 ---
 4 files changed, 77 deletions(-)

diff --git a/src/operator/contrib/adaptive_avg_pooling-inl.h b/src/operator/contrib/adaptive_avg_pooling-inl.h
index 7331c7b..12284d9 100644
--- a/src/operator/contrib/adaptive_avg_pooling-inl.h
+++ b/src/operator/contrib/adaptive_avg_pooling-inl.h
@@ -144,41 +144,6 @@ static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
   return true;
 }
 
-static bool AdaptiveAvgPoolOpInferType(const nnvm::NodeAttrs& attrs,
-                                       std::vector<int> *in_type,
-                                       std::vector<int> *out_type) {
-  using namespace mshadow;
-  CHECK_EQ(in_type->size(), 1U);
-  int dtype = (*in_type)[0];
-  CHECK_NE(dtype, -1) << "First input must have specified type";
-  // For float16 input type beta, gamma, mean, and average are stored in float32.
-  // For other input types, these parameters have the same type as input
-  // NOTE: This requirement is from cuDNN (v. 4 and 5)
-  int dtype_param = 0;
-  MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DTypeX, AccRealX, {
-      dtype_param = mshadow::DataType<AccRealX>::kFlag; });
-  out_type->clear();
-  out_type->push_back(dtype_param);
-  return true;
-}
-
-static inline bool AdaptiveAvgPoolOpStorageType(const nnvm::NodeAttrs &attrs,
-                                                const int dev_mask,
-                                                DispatchMode *dispatch_mode,
-                                                std::vector<int> *in_attrs,
-                                                std::vector<int> *out_attrs) {
-  CHECK_EQ(in_attrs->size(), 1);
-  CHECK_EQ(out_attrs->size(), 1);
-  *dispatch_mode = DispatchMode::kFCompute;
-  for (int& v : *in_attrs) {
-    if (v == - 1) v = kDefaultStorage;
-  }
-  for (size_t i = 0; i < out_attrs->size(); i++) {
-    (*out_attrs)[i] = kDefaultStorage;
-  }
-  return true;
-}
-
 using namespace mshadow;
 template<typename xpu, int Dim, typename DType>
 MSHADOW_XINLINE int get_stride(Tensor<xpu, Dim, DType> tensor, int idx) {
diff --git a/src/operator/contrib/adaptive_avg_pooling.cc b/src/operator/contrib/adaptive_avg_pooling.cc
index 0795711..00ab366 100644
--- a/src/operator/contrib/adaptive_avg_pooling.cc
+++ b/src/operator/contrib/adaptive_avg_pooling.cc
@@ -216,8 +216,6 @@ The pooling kernel and stride sizes are automatically chosen for desired output
 .set_num_inputs(1)
 .set_num_outputs(1)
 .set_attr<nnvm::FInferShape>("FInferShape", AdaptiveAvgPoolOpInferShape)
-.set_attr<nnvm::FInferType>("FInferType", AdaptiveAvgPoolOpInferType)
-.set_attr<FInferStorageType>("FInferStorageType", AdaptiveAvgPoolOpStorageType)
 .set_attr<FCompute>("FCompute<cpu>", AdaptiveAvgPoolOpForward<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
   ElemwiseGradUseNone{"_backward_contrib_AdaptiveAvgPooling2D"})
@@ -229,7 +227,6 @@ NNVM_REGISTER_OP(_backward_contrib_AdaptiveAvgPooling2D)
 .set_num_inputs(1)
 .set_num_outputs(1)
 .set_attr<nnvm::TIsBackward>("TIsBackward", true)
-.set_attr<FInferStorageType>("FInferStorageType", AdaptiveAvgPoolOpStorageType)
 .set_attr<FCompute>("FCompute<cpu>", AdaptiveAvgPoolOpBackward<cpu>);
 
 
diff --git a/src/operator/contrib/bilinear_resize-inl.h b/src/operator/contrib/bilinear_resize-inl.h
index c096f01..ff3f794 100644
--- a/src/operator/contrib/bilinear_resize-inl.h
+++ b/src/operator/contrib/bilinear_resize-inl.h
@@ -136,42 +136,6 @@ static bool BilinearSampleOpInferShape(const nnvm::NodeAttrs& attrs,
   return true;
 }
 
-static bool BilinearSampleOpInferType(const nnvm::NodeAttrs& attrs,
-                                      std::vector<int> *in_type,
-                                      std::vector<int> *out_type) {
-  using namespace mshadow;
-  CHECK_EQ(in_type->size(), 1U);
-  int dtype = (*in_type)[0];
-  CHECK_NE(dtype, -1) << "First input must have specified type";
-  // For float16 input type beta, gamma, mean, and average are stored in float32.
-  // For other input types, these parameters have the same type as input
-  // NOTE: This requirement is from cuDNN (v. 4 and 5)
-  int dtype_param = 0;
-  MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DTypeX, AccRealX, {
-      dtype_param = mshadow::DataType<AccRealX>::kFlag; });
-  out_type->clear();
-  out_type->push_back(dtype_param);
-  return true;
-}
-
-static inline bool BilinearSampleOpStorageType(const nnvm::NodeAttrs &attrs,
-                                               const int dev_mask,
-                                               DispatchMode *dispatch_mode,
-                                               std::vector<int> *in_attrs,
-                                               std::vector<int> *out_attrs) {
-  CHECK_EQ(in_attrs->size(), 1);
-  CHECK_EQ(out_attrs->size(), 1);
-  *dispatch_mode = DispatchMode::kFCompute;
-  for (int& v : *in_attrs) {
-    if (v == - 1) v = kDefaultStorage;
-  }
-  for (size_t i = 0; i < out_attrs->size(); i++) {
-    (*out_attrs)[i] = kDefaultStorage;
-  }
-  return true;
-}
-
-
 }  // namespace op
 }  // namespace mxnet
 
diff --git a/src/operator/contrib/bilinear_resize.cc b/src/operator/contrib/bilinear_resize.cc
index e1248ce..074f74a 100644
--- a/src/operator/contrib/bilinear_resize.cc
+++ b/src/operator/contrib/bilinear_resize.cc
@@ -177,8 +177,6 @@ for more details.
 .set_num_inputs(1)
 .set_num_outputs(1)
 .set_attr<nnvm::FInferShape>("FInferShape", BilinearSampleOpInferShape)
-.set_attr<nnvm::FInferType>("FInferType", BilinearSampleOpInferType)
-.set_attr<FInferStorageType>("FInferStorageType", BilinearSampleOpStorageType)
 .set_attr<FCompute>("FCompute<cpu>", BilinearSampleOpForward<cpu>)
 .set_attr<nnvm::FGradient>("FGradient",
   ElemwiseGradUseNone{"_backward_contrib_BilinearResize2D"})
@@ -190,7 +188,6 @@ NNVM_REGISTER_OP(_backward_contrib_BilinearResize2D)
 .set_num_inputs(1)
 .set_num_outputs(1)
 .set_attr<nnvm::TIsBackward>("TIsBackward", true)
-.set_attr<FInferStorageType>("FInferStorageType", BilinearSampleOpStorageType)
 .set_attr<FCompute>("FCompute<cpu>", BilinearSampleOpBackward<cpu>);