You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by an...@apache.org on 2018/06/14 20:13:22 UTC

[incubator-mxnet] branch v1.2.0 updated: Enable CUDNN for conv1D (#11194) (#11270)

This is an automated email from the ASF dual-hosted git repository.

anirudh2290 pushed a commit to branch v1.2.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.2.0 by this push:
     new 73ebc76  Enable CUDNN for conv1D (#11194) (#11270)
73ebc76 is described below

commit 73ebc762c3dbaa122897d377f128634ec291d3a6
Author: Haibin Lin <li...@gmail.com>
AuthorDate: Thu Jun 14 13:11:25 2018 -0700

    Enable CUDNN for conv1D (#11194) (#11270)
    
    * enable cudnn for conv1d
    
    * add checks for backward
    
    * fix build
    
    * fix build
    
    * fix lint
    
    * Update convolution.cc
---
 src/operator/nn/convolution.cu | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)

diff --git a/src/operator/nn/convolution.cu b/src/operator/nn/convolution.cu
index 65a320d..9f61212 100644
--- a/src/operator/nn/convolution.cu
+++ b/src/operator/nn/convolution.cu
@@ -89,8 +89,11 @@ void ConvolutionCompute<gpu>(const nnvm::NodeAttrs& attrs,
   const ConvolutionParam& param = nnvm::get<ConvolutionParam>(attrs.parsed);
   int dtype = inputs[conv::kData].type_flag_;
 
-  // If 1D convolution, use MXNet implementation
-  if (param.kernel.ndim() == 1) {
+#if CUDNN_MAJOR < 5
+  if (param_.layout.value() != kNCW &&
+      param_.layout.value() != kNCHW &&
+      param_.layout.value() != kNCDHW) {
+    // Need CuDNN > 5.0 for layout support. use MXNet implementation
     MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
       ConvolutionOp<gpu, DType> op;
       op.Init(param);
@@ -98,6 +101,8 @@ void ConvolutionCompute<gpu>(const nnvm::NodeAttrs& attrs,
     })
     return;
   }
+#endif
+
 #if MXNET_USE_CUDNN == 0 || CUDNN_MAJOR < 7
   if (param.num_filter == param.num_group &&
       param.layout.value() == mshadow::kNCHW &&
@@ -162,8 +167,11 @@ void ConvolutionGradCompute<gpu>(const nnvm::NodeAttrs& attrs,
   const std::vector<TBlob> &in_grad = outputs;
   int dtype = out_grad.type_flag_;
 
-  // If 1D convolution, use MXNet implementation
-  if (param.kernel.ndim() == 1) {
+#if CUDNN_MAJOR < 5
+  if (param_.layout.value() != kNCW &&
+      param_.layout.value() != kNCHW &&
+      param_.layout.value() != kNCDHW) {
+    // Need CuDNN > 5.0 for layout support. use MXNet implementation
     MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
       ConvolutionOp<gpu, DType> op;
       op.Init(param);
@@ -171,6 +179,7 @@ void ConvolutionGradCompute<gpu>(const nnvm::NodeAttrs& attrs,
     })
     return;
   }
+#endif
 #if MXNET_USE_CUDNN == 0 || CUDNN_MAJOR < 7
   if (param.num_filter == param.num_group &&
       param.layout.value() == mshadow::kNCHW &&

-- 
To stop receiving notification emails like this one, please contact
anirudh2290@apache.org.