You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2018/07/05 03:10:11 UTC

[16/18] incubator-singa git commit: SINGA-371 Implement functional operations in c++ for autograd

SINGA-371 Implement functional operations in c++ for autograd

- fixed some bugs.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/4a45ee6f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/4a45ee6f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/4a45ee6f

Branch: refs/heads/master
Commit: 4a45ee6f080bb9eacdeb1294047a78a3dbd4635a
Parents: 5340b65
Author: xuewanqi <xu...@outlook.com>
Authored: Wed Jul 4 06:46:09 2018 +0000
Committer: xuewanqi <xu...@outlook.com>
Committed: Wed Jul 4 07:31:58 2018 +0000

----------------------------------------------------------------------
 python/singa/autograd.py           | 28 ++++++++++++++++------------
 src/api/model_operation.i          | 18 +++++++++---------
 src/model/layer/convolution.cc     |  4 ++--
 src/model/layer/convolution.h      | 21 +++++++++++----------
 src/model/operation/convolution.cc | 28 ++++++++++++++--------------
 src/model/operation/convolution.h  | 18 +++++++++---------
 test/python/test_operation.py      |  4 ++--
 7 files changed, 63 insertions(+), 58 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a45ee6f/python/singa/autograd.py
----------------------------------------------------------------------
diff --git a/python/singa/autograd.py b/python/singa/autograd.py
index 2a10608..b05f701 100755
--- a/python/singa/autograd.py
+++ b/python/singa/autograd.py
@@ -463,7 +463,10 @@ class _Conv2D(Operation):
         #assert 0 == 0, 'invalid padding'
 
         if training:
-            self.inputs = (x, W, b)
+            if self.handle.bias_term_:
+                self.inputs = (x, W, b)
+            else:
+                self.inputs = (x, W)
 
         if self.handle.device_id == -1:
             return singa.CpuConvForward(x, W, b, self.handle)
@@ -717,32 +720,33 @@ class Conv2D(NewLayer):
         else:
             # to keep consistency when to do forward.
             self.b = Tensor(data=CTensor(
-                [1]), requires_grad=False, stores_grad=False)
-            self.b.set_value(0.0)
+                []), requires_grad=False, stores_grad=False)
 
     def __call__(self, x):
-        assert x.shape[1] == self.in_channels,'in_channels dismatched'
-        assert (x.shape[2]+2*self.padding[0]-self.kernel_size[0])%self.stride[0] == 0, 'invalid padding or strides.'
-        assert (x.shape[3]+2*self.padding[1]-self.kernel_size[1])%self.stride[1] == 0, 'invalid padding or stride.'
+        assert x.shape[1] == self.in_channels, 'in_channels dismatched'
+        assert (x.shape[2] + 2 * self.padding[0] - self.kernel_size[0]
+                ) % self.stride[0] == 0, 'invalid padding or strides.'
+        assert (x.shape[3] + 2 * self.padding[1] - self.kernel_size[1]
+                ) % self.stride[1] == 0, 'invalid padding or stride.'
 
         self.device_check(x, self.W, self.b)
 
         if x.device.id() == -1:
             if not hasattr(self, 'handle'):
                 self.handle = singa.ConvHandle(x.data, self.kernel_size, self.stride,
-                                                 self.padding, self.in_channels, self.out_channels, self.bias)
+                                               self.padding, self.in_channels, self.out_channels, self.bias)
             elif x.shape[0] != self.handle.batchsize:
                 self.handle = singa.ConvHandle(x.data, self.kernel_size, self.stride,
-                                                 self.padding, self.in_channels, self.out_channels, self.bias)
+                                               self.padding, self.in_channels, self.out_channels, self.bias)
         else:
             if not hasattr(self, 'handle'):
                 self.handle = singa.CudnnConvHandle(x.data, self.kernel_size, self.stride,
-                                                      self.padding, self.in_channels, self.out_channels, self.bias,
-                                                      self.inner_params['workspace_MB_limit'] * 1024 * 1024, self.inner_params['cudnn_prefer'])
+                                                    self.padding, self.in_channels, self.out_channels, self.bias,
+                                                    self.inner_params['workspace_MB_limit'] * 1024 * 1024, self.inner_params['cudnn_prefer'])
             elif x.shape[0] != self.handle.batchsize:
                 self.handle = singa.CudnnConvHandle(x.data, self.kernel_size, self.stride,
-                                                      self.padding, self.in_channels, self.out_channels, self.bias,
-                                                      self.inner_params['workspace_MB_limit'] * 1024 * 1024, self.inner_params['cudnn_prefer'])
+                                                    self.padding, self.in_channels, self.out_channels, self.bias,
+                                                    self.inner_params['workspace_MB_limit'] * 1024 * 1024, self.inner_params['cudnn_prefer'])
         self.handle.device_id = x.device.id()
 
         y = conv2d(x, self.W, self.b, self.handle)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a45ee6f/src/api/model_operation.i
----------------------------------------------------------------------
diff --git a/src/api/model_operation.i b/src/api/model_operation.i
index 58e5270..26f5c69 100755
--- a/src/api/model_operation.i
+++ b/src/api/model_operation.i
@@ -10,10 +10,10 @@ struct ConvHandle{
 		size_t batchsize;
         const bool bias_term_;
 
-		ConvHandle(const Tensor &input, const std::vector<size_t> kernel_size, 
-                    const std::vector<size_t> stride, const std::vector<size_t> padding,
-                    const size_t in_channels, const size_t out_channels,
-                    const bool bias_term_);
+		ConvHandle(const Tensor &input, const std::vector<size_t>& kernel_size,
+             const std::vector<size_t>& stride, const std::vector<size_t>& padding,
+             const size_t in_channels, const size_t out_channels,
+             const bool bias);
               	};
 
 struct CudnnConvHandle{
@@ -21,11 +21,11 @@ struct CudnnConvHandle{
 		size_t batchsize;
         const bool bias_term_;
 		
-		CudnnConvHandle(const Tensor &input, const std::vector<size_t> kernel_size, 
-                    const std::vector<size_t> stride, const std::vector<size_t> padding,
-                    const size_t in_channels, const size_t out_channels,
-                    const bool bias_term_, const size_t workspace_byte_limit_=1024*1024*1024,
-                    const std::string prefer_="fastest");
+		CudnnConvHandle(const Tensor &input, const std::vector<size_t>& kernel_size,
+                  const std::vector<size_t>& stride, const std::vector<size_t>& padding,
+                  const size_t in_channels, const size_t out_channels,
+                  const bool bias, const size_t workspace_byte_limit_ = 1024 * 1024 * 1024,
+                  const std::string& prefer_ = "fastest");
                 };
 
 Tensor GpuConvForward(const Tensor &x, const Tensor &W, const Tensor &b, const CudnnConvHandle &cch);

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a45ee6f/src/model/layer/convolution.cc
----------------------------------------------------------------------
diff --git a/src/model/layer/convolution.cc b/src/model/layer/convolution.cc
old mode 100644
new mode 100755
index 3fc7afb..cc77433
--- a/src/model/layer/convolution.cc
+++ b/src/model/layer/convolution.cc
@@ -194,7 +194,7 @@ void Convolution::ToDevice(std::shared_ptr<Device> device) {
   bias_.ToDevice(device);
 }
 
-void Convolution::Im2col(const float *data_im, const int channels,
+void Im2col(const float *data_im, const int channels,
                          const int height, const int width,
                          const int kernel_h, const int kernel_w,
                          const int pad_h, const int pad_w,
@@ -221,7 +221,7 @@ void Convolution::Im2col(const float *data_im, const int channels,
   }
 }
 
-void Convolution::Col2im(const float *data_col, const int channels,
+void Col2im(const float *data_col, const int channels,
                          const int height, const int width,
                          const int kernel_h, const int kernel_w,
                          const int pad_h, const int pad_w,

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a45ee6f/src/model/layer/convolution.h
----------------------------------------------------------------------
diff --git a/src/model/layer/convolution.h b/src/model/layer/convolution.h
old mode 100644
new mode 100755
index 89b5319..d11cdeb
--- a/src/model/layer/convolution.h
+++ b/src/model/layer/convolution.h
@@ -46,16 +46,6 @@ class Convolution : public Layer {
 
   void ToDevice(std::shared_ptr<Device> device) override;
 
-  void Im2col(const float* data_im, const int channels, const int height,
-              const int width, const int kernel_h, const int kernel_w,
-              const int pad_h, const int pad_w, const int stride_h,
-              const int stride_w, float* data_col);
-
-  void Col2im(const float* data_col, const int channels, const int height,
-              const int width, const int kernel_h, const int kernel_w,
-              const int pad_h, const int pad_w, const int stride_h,
-              const int stride_w, float* data_im);
-
   const std::vector<Tensor> param_values() override {
     if (bias_term_)
       return std::vector<Tensor>{weight_, bias_};
@@ -97,5 +87,16 @@ class Convolution : public Layer {
   bool bias_term_;
   vector<size_t> out_sample_shape_;
 };
+
+void Im2col(const float* data_im, const int channels, const int height,
+            const int width, const int kernel_h, const int kernel_w,
+            const int pad_h, const int pad_w, const int stride_h,
+            const int stride_w, float* data_col);
+
+void Col2im(const float* data_col, const int channels, const int height,
+            const int width, const int kernel_h, const int kernel_w,
+            const int pad_h, const int pad_w, const int stride_h,
+            const int stride_w, float* data_im);
+            
 }  // namespace singa
 #endif  // SRC_MODEL_LAYER_CONVOLUTION_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a45ee6f/src/model/operation/convolution.cc
----------------------------------------------------------------------
diff --git a/src/model/operation/convolution.cc b/src/model/operation/convolution.cc
index d64fbc1..9a702fa 100755
--- a/src/model/operation/convolution.cc
+++ b/src/model/operation/convolution.cc
@@ -1,10 +1,10 @@
 #include "./convolution.h"
-// #include "../layer/convolution.h"
-#include<iostream>
+#include "../layer/convolution.h"
+
 
 namespace singa {
 
-ConvHandle::ConvHandle(const Tensor &input, const std::vector<size_t> kernel_size,
+ConvHandle::ConvHandle(const Tensor &input, const std::vector<size_t>& kernel_size,
                        const std::vector<size_t>& stride, const std::vector<size_t>& padding,
                        const size_t in_channels, const size_t out_channels,
                        const bool bias) {
@@ -37,7 +37,7 @@ ConvHandle::ConvHandle(const Tensor &input, const std::vector<size_t> kernel_siz
   imagesize = input.Size() / batchsize;
 }
 
-// Convolution C;
+
 
 Tensor CpuConvForward(const Tensor &x, Tensor &W,  Tensor &b, const ConvHandle &ch) {
   CHECK_EQ(x.device()->lang(), kCpp);
@@ -67,7 +67,7 @@ Tensor CpuConvForward(const Tensor &x, Tensor &W,  Tensor &b, const ConvHandle &
   float *data_col = new float[ch.col_height_ * ch.col_width_];
   auto in_data = x.data<float>();
   for (size_t num = 0; num < ch.batchsize; num++) {
-    C.Im2col(in_data + num * ch.imagesize, ch.channels_, ch.height_, ch.width_, ch.kernel_h_,
+    Im2col(in_data + num * ch.imagesize, ch.channels_, ch.height_, ch.width_, ch.kernel_h_,
              ch.kernel_w_, ch.pad_h_, ch.pad_w_, ch.stride_h_, ch.stride_w_, data_col);
 
     col_data.CopyDataFromHostPtr(data_col, ch.col_height_ * ch.col_width_);
@@ -105,7 +105,7 @@ Tensor CpuConvBackwardx(const Tensor &dy, Tensor &W, const Tensor &x, const Conv
     CopyDataToFrom(&grad_b, dy, grad_b.Size(), 0, num * grad_b.Size());
     Tensor dcol_b = Mult(W.T(), grad_b);
     auto dcol_data = dcol_b.data<float>();
-    C.Col2im(dcol_data, ch.channels_, ch.height_, ch.width_, ch.kernel_h_, ch.kernel_w_, ch.pad_h_,
+    Col2im(dcol_data, ch.channels_, ch.height_, ch.width_, ch.kernel_h_, ch.kernel_w_, ch.pad_h_,
              ch.pad_w_, ch.stride_h_, ch.stride_w_, dx_b);
     dx.CopyDataFromHostPtr(dx_b, ch.imagesize, num * ch.imagesize);
   }
@@ -134,7 +134,7 @@ Tensor CpuConvBackwardW(const Tensor &dy, const Tensor &x, const Tensor &W, cons
   float *data_col = new float[ch.col_height_ * ch.col_width_];
   auto in_data = dy.data<float>();
   for (size_t num = 0; num < ch.batchsize; num++) {
-    C.Im2col(in_data + num * ch.imagesize, ch.channels_, ch.height_, ch.width_, ch.kernel_h_,
+    Im2col(in_data + num * ch.imagesize, ch.channels_, ch.height_, ch.width_, ch.kernel_h_,
              ch.kernel_w_, ch.pad_h_, ch.pad_w_, ch.stride_h_, ch.stride_w_, data_col);
     col_data.CopyDataFromHostPtr(data_col, ch.col_height_ * ch.col_width_);
     Tensor grad_b(Shape{ch.num_filters_, ch.conv_height_ * ch.conv_width_});
@@ -171,9 +171,9 @@ Tensor CpuConvBackwardb(const Tensor &dy, const Tensor &b, const ConvHandle &ch)
 #ifdef USE_CUDNN
 CudnnConvHandle::CudnnConvHandle(const Tensor &input, const std::vector<size_t>& kernel_size,
                                  const std::vector<size_t>& stride, const std::vector<size_t>& padding,
-                                 const size_t in_channels, const size_t out_channels, const bool bias_term_,
+                                 const size_t in_channels, const size_t out_channels, const bool bias,
                                  const size_t workspace_byte_limit_, const std::string& prefer_)
-  : ConvHandle(input, kernel_size, stride, padding, in_channels, out_channels, bias_term_) {
+  : ConvHandle(input, kernel_size, stride, padding, in_channels, out_channels, bias) {
 
   DataType dtype = input.data_type();
   auto dev = input.device();
@@ -295,7 +295,7 @@ Tensor GpuConvForward(const Tensor &x, const Tensor &W, const Tensor &b, const C
   Shape shape{cch.batchsize, cch.num_filters_, cch.conv_height_, cch.conv_width_};
   Tensor output(shape, dev, dtype);
 
-  output.device()->Exec([output, x, W, cch](Context * ctx) {
+  output.device()->Exec([&output, &x, &W, &cch](Context * ctx) {
     Block *inblock = x.block(), *outblock = output.block(),
            *wblock = W.block();
     float alpha = 1.f, beta = 0.f;
@@ -308,7 +308,7 @@ Tensor GpuConvForward(const Tensor &x, const Tensor &W, const Tensor &b, const C
   }, {x.block(), W.block()}, {output.block()}, cch.workspace_.block());
 
   if (cch.bias_term_) {
-    output.device()->Exec([output, b, cch](Context * ctx) {
+    output.device()->Exec([&output, &b, &cch](Context * ctx) {
       float beta = 1.f, alpha = 1.0f;
       Block *outblock = output.block(), *bblock = b.block();
       cudnnAddTensor(ctx->cudnn_handle, &alpha, cch.bias_desc_,
@@ -326,7 +326,7 @@ Tensor GpuConvBackwardx(const Tensor &dy, const Tensor &W, const Tensor &x, cons
   Tensor dx;
   dx.ResetLike(x);
 
-  dy.device()->Exec([dx, dy, W, cch](Context * ctx) {
+  dy.device()->Exec([&dx, &dy, &W, &cch](Context * ctx) {
     Block *wblock = W.block(), *dyblock = dy.block(),
            *dxblock = dx.block();
     float alpha = 1.f, beta = 0.f;
@@ -347,7 +347,7 @@ Tensor GpuConvBackwardW(const Tensor &dy, const Tensor &x, const Tensor &W, cons
   Tensor dW;
   dW.ResetLike(W);
 
-  dy.device()->Exec([dW, dy, x, W, cch](Context * ctx) {
+  dy.device()->Exec([&dW, &dy, &x, &cch](Context * ctx) {
     Block *inblock = x.block(), *dyblock = dy.block(),
            *dwblock = dW.block();
     float alpha = 1.f, beta = 0.f;
@@ -369,7 +369,7 @@ Tensor GpuConvBackwardb(const Tensor &dy, const Tensor &b, const CudnnConvHandle
   Tensor db;
   db.ResetLike(b);
 
-  dy.device()->Exec([db, dy, b, cch](Context * ctx) {
+  dy.device()->Exec([&db, &dy, &cch](Context * ctx) {
     Block *dyblock = dy.block(), *dbblock = db.block();
     float alpha = 1.f, beta = 0.f;
     cudnnConvolutionBackwardBias(ctx->cudnn_handle, &alpha, cch.y_desc_,

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a45ee6f/src/model/operation/convolution.h
----------------------------------------------------------------------
diff --git a/src/model/operation/convolution.h b/src/model/operation/convolution.h
index a114b47..93f7775 100755
--- a/src/model/operation/convolution.h
+++ b/src/model/operation/convolution.h
@@ -3,12 +3,12 @@
 
 #include <string>
 #include <vector>
+#include "singa/core/tensor.h"
 #include "singa/utils/logging.h"
 
 #ifdef USE_CUDNN
 #include <cudnn.h>
-// #include "../layer/cudnn_convolution.h"
-// #include "../layer/cudnn_utils.h"
+#include "../layer/cudnn_utils.h"
 #endif // USE_CUDNN
 
 
@@ -21,7 +21,7 @@ class ConvHandle {
              const std::vector<size_t>& stride, const std::vector<size_t>& padding,
              const size_t in_channels, const size_t out_channels,
              const bool bias);
- protected:
+ 
   size_t kernel_w_;
   size_t pad_w_;
   size_t stride_w_;
@@ -66,12 +66,12 @@ class CudnnConvHandle: public ConvHandle {
                   const std::string& prefer_ = "fastest");
   ~CudnnConvHandle();
   // TODO(wangwei) add the destructor
- protected:
-  cudnnTensorDescriptor_t x_desc_ ;
-  cudnnTensorDescriptor_t y_desc_ ;
-  cudnnTensorDescriptor_t bias_desc_ ;
-  cudnnFilterDescriptor_t filter_desc_ ;
-  cudnnConvolutionDescriptor_t conv_desc_ ;
+ 
+  cudnnTensorDescriptor_t x_desc_ = nullptr;
+  cudnnTensorDescriptor_t y_desc_ = nullptr;
+  cudnnTensorDescriptor_t bias_desc_ = nullptr;
+  cudnnFilterDescriptor_t filter_desc_ = nullptr;
+  cudnnConvolutionDescriptor_t conv_desc_ = nullptr;
   cudnnConvolutionFwdAlgo_t fp_alg_;
   cudnnConvolutionBwdFilterAlgo_t bp_filter_alg_;
   cudnnConvolutionBwdDataAlgo_t bp_data_alg_;

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a45ee6f/test/python/test_operation.py
----------------------------------------------------------------------
diff --git a/test/python/test_operation.py b/test/python/test_operation.py
old mode 100644
new mode 100755
index 1bbc70c..315a992
--- a/test/python/test_operation.py
+++ b/test/python/test_operation.py
@@ -48,7 +48,7 @@ class TestPythonOperation(unittest.TestCase):
 
         # forward without bias
         y_without_bias = conv_without_bias_0(gpu_input_tensor)
-        self.check_shape(y.shape, (2, 1, 2, 2))
+        self.check_shape(y_without_bias.shape, (2, 1, 2, 2))
 
     def test_conv2d_cpu(self):
         # (in_channels, out_channels, kernel_size)
@@ -68,7 +68,7 @@ class TestPythonOperation(unittest.TestCase):
 
         # forward without bias
         y_without_bias = conv_without_bias_1(cpu_input_tensor)
-        self.check_shape(y.shape, (2, 1, 2, 2))
+        self.check_shape(y_without_bias.shape, (2, 1, 2, 2))
 
 if __name__ == '__main__':
     unittest.main()