You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2018/07/05 03:09:57 UTC
[02/18] incubator-singa git commit: SINGA-371 Implement functional
operations in c++ for autograd
SINGA-371 Implement functional operations in c++ for autograd
- separate .cc and .h file
- write interface files for these function(not completed)
Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/af95cc1a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/af95cc1a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/af95cc1a
Branch: refs/heads/master
Commit: af95cc1a67f163bdef265f6bdc93aeaef05f848f
Parents: fc181cd
Author: xuewanqi <xu...@u.nus.edu>
Authored: Thu Jun 14 02:56:41 2018 +0000
Committer: xuewanqi <xu...@u.nus.edu>
Committed: Wed Jun 20 14:47:05 2018 +0000
----------------------------------------------------------------------
src/api/model_operation.i | 59 +++++++++++++++++++++++++++++++++++
src/api/singa.i | 1 +
src/model/convolution_forward.cc | 57 ++++++---------------------------
src/model/convolution_forward.h | 59 +++++++++++++++++++++++++++++++++++
4 files changed, 129 insertions(+), 47 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af95cc1a/src/api/model_operation.i
----------------------------------------------------------------------
diff --git a/src/api/model_operation.i b/src/api/model_operation.i
new file mode 100644
index 0000000..64ecca1
--- /dev/null
+++ b/src/api/model_operation.i
@@ -0,0 +1,59 @@
+/* interface file for swig */
+
+%module model_operation
+%include "std_string.i"
+
+%{
+#include "../src/model/convolution_functions.h"
+%}
+
+namespace singa{
+extern struct ConvHandle{
+ size_t kernel_w_;
+ size_t pad_w_;
+ size_t stride_w_;
+ size_t kernel_h_;
+ size_t pad_h_;
+ size_t stride_h_;
+
+ size_t channels_;
+ size_t num_filters_;
+
+ bool bias_term_;
+
+ size_t workspace_byte_limit_;
+ std::string prefer_;
+};
+
+struct CudnnConvHandle{
+ cudnnTensorDescriptor_t x_desc_ ;
+ cudnnTensorDescriptor_t y_desc_ ;
+ cudnnTensorDescriptor_t bias_desc_ ;
+ cudnnFilterDescriptor_t filter_desc_ ;
+ cudnnConvolutionDescriptor_t conv_desc_ ;
+ cudnnConvolutionFwdAlgo_t fp_alg_;
+ cudnnConvolutionBwdFilterAlgo_t bp_filter_alg_;
+ cudnnConvolutionBwdDataAlgo_t bp_data_alg_;
+
+ size_t workspace_count_;
+ Tensor workspace_;
+
+ size_t height_;
+ size_t width_;
+ size_t conv_height_;
+ size_t conv_width_;
+ size_t batchsize;
+};
+
+extern ConvHandle SetupConv(const size_t in_channels, const LayerConf &conf);
+
+CudnnConvHandle InitCudnn(const Tensor &input, const ConvHandle ch);
+
+Tensor CudnnConvForward(const Tensor x, const Tensor W, const Tensor b,
+ const ConvHandle ch, const CudnnConvHandle cch);
+
+Tensor CudnnConvBackwardW(const Tensor dy, const Tensor x, const Tensor W, const CudnnConvHandle cch);
+
+Tensor CudnnConvBackwardb(const Tensor dy, const Tensor b, const CudnnConvHandle cch);
+
+Tensor CudnnConvBackwardx(const Tensor dy, const Tensor W, const Tensor x, const CudnnConvHandle cch);
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af95cc1a/src/api/singa.i
----------------------------------------------------------------------
diff --git a/src/api/singa.i b/src/api/singa.i
index 3fc3b47..b5abc6b 100644
--- a/src/api/singa.i
+++ b/src/api/singa.i
@@ -29,4 +29,5 @@
%include "model_optimizer.i"
%include "model_loss.i"
%include "model_metric.i"
+%include "model_operation.i"
%include "io_snapshot.i"
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af95cc1a/src/model/convolution_forward.cc
----------------------------------------------------------------------
diff --git a/src/model/convolution_forward.cc b/src/model/convolution_forward.cc
index 8457e95..52acf05 100644
--- a/src/model/convolution_forward.cc
+++ b/src/model/convolution_forward.cc
@@ -1,48 +1,11 @@
-#include <string>
-#include <cudnn.h>
-#include "./layer/cudnn_convolution.h"
-#include "./layer/cudnn_utils.h"
-#include "singa/utils/logging.h"
+//#include <string>
+//#include <cudnn.h>
+//#include "./layer/cudnn_convolution.h"
+//#include "./layer/cudnn_utils.h"
+//#include "singa/utils/logging.h"
+#include "./convolution_forward.h"
namespace singa{
-struct ConvHandle{
- size_t kernel_w_;
- size_t pad_w_;
- size_t stride_w_;
- size_t kernel_h_;
- size_t pad_h_;
- size_t stride_h_;
-
- size_t channels_;
- size_t num_filters_;
-
- bool bias_term_;
-
- size_t workspace_byte_limit_;
- std::string prefer_;
-};
-
-
-struct CudnnConvHandle{
- cudnnTensorDescriptor_t x_desc_ ;
- cudnnTensorDescriptor_t y_desc_ ;
- cudnnTensorDescriptor_t bias_desc_ ;
- cudnnFilterDescriptor_t filter_desc_ ;
- cudnnConvolutionDescriptor_t conv_desc_ ;
- cudnnConvolutionFwdAlgo_t fp_alg_;
- cudnnConvolutionBwdFilterAlgo_t bp_filter_alg_;
- cudnnConvolutionBwdDataAlgo_t bp_data_alg_;
-
- size_t workspace_count_;
- Tensor workspace_;
-
- size_t height_;
- size_t width_;
- size_t conv_height_;
- size_t conv_width_;
- size_t batchsize;
-};
-
// Done in conv2d.__init__()
ConvHandle SetupConv(const size_t in_channels, const LayerConf &conf){
@@ -297,7 +260,7 @@ CudnnConvHandle InitCudnn(const Tensor &input, const ConvHandle ch){
};
};
-Tensor CudnnConvForward(const Tensor x, const Tensor W, const Tensor b,
+Tensor CudnnConvForward(const Tensor &x, const Tensor &W, const Tensor &b,
const ConvHandle ch, const CudnnConvHandle cch){
CHECK_EQ(x.device()->lang(), kCuda);
CHECK_EQ(x.nDim(), 4u);
@@ -337,7 +300,7 @@ Tensor CudnnConvForward(const Tensor x, const Tensor W, const Tensor b,
};
// input Tensor W for Reset dW purpose, can avoid this later.
-Tensor CudnnConvBackwardW(const Tensor dy, const Tensor x, const Tensor W, const CudnnConvHandle cch){
+Tensor CudnnConvBackwardW(const Tensor &dy, const Tensor &x, const Tensor &W, const CudnnConvHandle cch){
CHECK_EQ(dy.device()->lang(), kCuda);
CHECK_EQ(dy.nDim(), 4u);
@@ -360,7 +323,7 @@ Tensor CudnnConvBackwardW(const Tensor dy, const Tensor x, const Tensor W, const
};
// input Tensor b for Reset db purpose, can avoid this later.
-Tensor CudnnConvBackwardb(const Tensor dy, const Tensor b, const CudnnConvHandle cch){
+Tensor CudnnConvBackwardb(const Tensor &dy, const Tensor &b, const CudnnConvHandle cch){
CHECK_EQ(dy.device()->lang(), kCuda);
CHECK_EQ(dy.nDim(), 4u);
@@ -377,7 +340,7 @@ Tensor CudnnConvBackwardb(const Tensor dy, const Tensor b, const CudnnConvHandle
return db;
};
-Tensor CudnnConvBackwardx(const Tensor dy, const Tensor W, const Tensor x, const CudnnConvHandle cch){
+Tensor CudnnConvBackwardx(const Tensor &dy, const Tensor &W, const Tensor &x, const CudnnConvHandle cch){
CHECK_EQ(dy.device()->lang(), kCuda);
CHECK_EQ(dy.nDim(), 4u);
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/af95cc1a/src/model/convolution_forward.h
----------------------------------------------------------------------
diff --git a/src/model/convolution_forward.h b/src/model/convolution_forward.h
new file mode 100644
index 0000000..eba0e50
--- /dev/null
+++ b/src/model/convolution_forward.h
@@ -0,0 +1,59 @@
+#include <string>
+#include <cudnn.h>
+#include "./layer/cudnn_convolution.h"
+#include "./layer/cudnn_utils.h"
+#include "singa/utils/logging.h"
+
+namespace singa{
+
+struct ConvHandle{
+ size_t kernel_w_;
+ size_t pad_w_;
+ size_t stride_w_;
+ size_t kernel_h_;
+ size_t pad_h_;
+ size_t stride_h_;
+
+ size_t channels_;
+ size_t num_filters_;
+
+ bool bias_term_;
+
+ size_t workspace_byte_limit_;
+ string prefer_;
+};
+
+struct CudnnConvHandle{
+ cudnnTensorDescriptor_t x_desc_ ;
+ cudnnTensorDescriptor_t y_desc_ ;
+ cudnnTensorDescriptor_t bias_desc_ ;
+ cudnnFilterDescriptor_t filter_desc_ ;
+ cudnnConvolutionDescriptor_t conv_desc_ ;
+ cudnnConvolutionFwdAlgo_t fp_alg_;
+ cudnnConvolutionBwdFilterAlgo_t bp_filter_alg_;
+ cudnnConvolutionBwdDataAlgo_t bp_data_alg_;
+
+ size_t workspace_count_;
+ Tensor workspace_;
+
+ size_t height_;
+ size_t width_;
+ size_t conv_height_;
+ size_t conv_width_;
+ size_t batchsize;
+};
+
+ConvHandle SetupConv(const size_t in_channels, const LayerConf &conf);
+
+CudnnConvHandle InitCudnn(const Tensor &input, const ConvHandle ch);
+
+Tensor CudnnConvForward(const Tensor &x, const Tensor &W, const Tensor &b,
+ const ConvHandle ch, const CudnnConvHandle cch);
+
+Tensor CudnnConvBackwardW(const Tensor &dy, const Tensor &x, const Tensor &W, const CudnnConvHandle cch);
+
+Tensor CudnnConvBackwardb(const Tensor &dy, const Tensor &b, const CudnnConvHandle cch);
+
+Tensor CudnnConvBackwardx(const Tensor &dy, const Tensor &W, const Tensor &x, const CudnnConvHandle cch);
+
+}