You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by ka...@apache.org on 2018/07/13 07:47:06 UTC

[1/2] incubator-singa git commit: SINGA-382 Implement concat operation for autograd

Repository: incubator-singa
Updated Branches:
  refs/heads/master a36291824 -> 76779be72


SINGA-382 Implement concat operation for autograd

- develop concat operation and test it backward function.

- its forward function has one bug in singa.ConcatOn(). the function cannot understand vector of CTensor corr  ectly.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/054f303a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/054f303a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/054f303a

Branch: refs/heads/master
Commit: 054f303af7b881b0dea98e39a77f52c3d41357ba
Parents: a362918
Author: xuewanqi <xu...@outlook.com>
Authored: Thu Jul 12 08:08:49 2018 +0000
Committer: Wang Wei <wa...@gmail.com>
Committed: Fri Jul 13 14:15:06 2018 +0800

----------------------------------------------------------------------
 include/singa/core/tensor.h |  2 +-
 python/singa/autograd.py    | 33 ++++++++++++++++++++++++++++++++-
 src/api/core_tensor.i       |  3 +++
 src/core/tensor/tensor.cc   |  3 ++-
 4 files changed, 38 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/054f303a/include/singa/core/tensor.h
----------------------------------------------------------------------
diff --git a/include/singa/core/tensor.h b/include/singa/core/tensor.h
old mode 100644
new mode 100755
index a73821c..905da27
--- a/include/singa/core/tensor.h
+++ b/include/singa/core/tensor.h
@@ -563,7 +563,7 @@ Tensor SliceColumns(const Tensor &in, const size_t start, const size_t end);
 /// tensor in 'in' is a 2D tensor. Values are copied, no memory sharing.
 Tensor ConcatenateRows(const vector<Tensor> &in);
 /// Return a tensor concatenated of the input tensors along the give axis.
-Tensor ConcatOn(const vector<Tensor> &in, int axis);
+Tensor ConcatOn(const std::vector<Tensor> &in, int axis);
 /// Alias name for function ConcatenateRows
 Tensor ConcatRows(const vector<Tensor> &in);
 /// Return a tensor which is horizontally stacked from tensors in 'in'. Each

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/054f303a/python/singa/autograd.py
----------------------------------------------------------------------
diff --git a/python/singa/autograd.py b/python/singa/autograd.py
index 16d7f82..c3986f3 100755
--- a/python/singa/autograd.py
+++ b/python/singa/autograd.py
@@ -485,7 +485,7 @@ def ctensor2numpy(x):
 
 class Flatten(Operation):
 
-    def __init(self, start_axis=1):
+    def __init__(self, start_axis=1):
         # flatten all axis after (inclusive) start_axis
         self.start_axis = start_axis
         assert start_axis == 1, 'must flatten into 2d array not'
@@ -545,6 +545,37 @@ class Linear(Layer):
         return y
 
 
+class Concat(Operation):
+
+    def __init__(self, axis=0):
+        self.axis = axis
+
+    def forward(self, *xs):
+        if training:
+            offset = 0
+            self.slice_point = []
+            for t in xs:
+                offset += t.shape()[self.axis]
+                self.slice_point.append(offset)
+        return singa.ConcatOn(xs, self.axis)
+
+    def backward(self, dy):
+        assert hasattr(
+            self, 'slice_point'), 'Please set training as True before do BP. '
+        assert self.slice_point[-1] == dy.shape()[self.axis], 'Shape dismatched.'
+        dxs = []
+        last_offset = 0
+        for p in self.slice_point:
+            dxs.append(singa.SliceOn(dy, last_offset, p, self.axis))
+            last_offset = p
+        return tuple(dxs)
+
+
+def concat(*xs):
+    # TODO changable axis
+    return Concat()(*xs)
+
+
 class _Conv2D(Operation):
 
     def __init__(self, handle):

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/054f303a/src/api/core_tensor.i
----------------------------------------------------------------------
diff --git a/src/api/core_tensor.i b/src/api/core_tensor.i
old mode 100644
new mode 100755
index 9427b11..e98590c
--- a/src/api/core_tensor.i
+++ b/src/api/core_tensor.i
@@ -227,6 +227,9 @@ namespace singa{
   template <typename DType> Tensor operator>=(const Tensor &t, const DType x);
   %template(opge) operator>= <float>;
 
+  Tensor ConcatOn(const std::vector<Tensor> &in, int axis);
+  Tensor SliceOn(const Tensor&in, const size_t start, const size_t end, int axis);
+
 
   /* ========== Arithmetic operations ========== */
   %rename(__add__) operator+(const Tensor &lhs, const Tensor &rhs);

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/054f303a/src/core/tensor/tensor.cc
----------------------------------------------------------------------
diff --git a/src/core/tensor/tensor.cc b/src/core/tensor/tensor.cc
index 1ac1b42..720ef90 100755
--- a/src/core/tensor/tensor.cc
+++ b/src/core/tensor/tensor.cc
@@ -23,6 +23,7 @@
 #include <utility>
 #include <algorithm>
 
+
 #define Noaxis 9999
 
 namespace singa {
@@ -871,7 +872,7 @@ void DivColumn(const Tensor &v, Tensor *M) {
   MultColumn(inv, M);
 }
 
-Tensor ConcatOn(const vector<Tensor> &in, int axis) {
+Tensor ConcatOn(const std::vector<Tensor> &in, int axis) {
   vector<Tensor> tmp;
   Shape out_shape = in[0].shape();
   size_t dim = in[0].shape().size();


[2/2] incubator-singa git commit: SINGA-382 Implement concat operation for autograd

Posted by ka...@apache.org.
SINGA-382 Implement concat operation for autograd

Fix the bug in calling C++ ConcatOn function by convert the input args into VecTensor type.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/76779be7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/76779be7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/76779be7

Branch: refs/heads/master
Commit: 76779be72ef67de5aba6bdbc669f3252ab8e4104
Parents: 054f303
Author: wang wei <wa...@comp.nus.edu.sg>
Authored: Thu Jul 12 22:02:07 2018 +0800
Committer: Wang Wei <wa...@gmail.com>
Committed: Fri Jul 13 15:08:46 2018 +0800

----------------------------------------------------------------------
 examples/autograd/mnist_cnn.py |  9 ++++++---
 python/singa/autograd.py       | 12 +++++++-----
 2 files changed, 13 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/76779be7/examples/autograd/mnist_cnn.py
----------------------------------------------------------------------
diff --git a/examples/autograd/mnist_cnn.py b/examples/autograd/mnist_cnn.py
index 92fc43a..3ddd532 100755
--- a/examples/autograd/mnist_cnn.py
+++ b/examples/autograd/mnist_cnn.py
@@ -107,7 +107,8 @@ if __name__ == '__main__':
     # operations initialization
     conv1 = autograd.Conv2D(1, 32, 3, padding=1, bias=False)
     bn1 = autograd.BatchNorm(32)
-    conv2 = autograd.Conv2D(32, 32, 3, padding=1)
+    conv21 = autograd.Conv2D(32, 16, 3, padding=1)
+    conv22 = autograd.Conv2D(32, 16, 3, padding=1)
     bn2 = autograd.BatchNorm(32)
     linear = autograd.Linear(32 * 28 * 28, 10)
     pooling1 = autograd.MaxPooling2D(3, 1, padding=1)
@@ -118,8 +119,10 @@ if __name__ == '__main__':
         y = autograd.relu(y)
         y = bn1(y)
         y = pooling1(y)
-
-        y = conv2(y)
+        y1 = conv21(y)
+        y2 = conv22(y)
+        y = autograd.concat((y1, y2), 1)
+        y = bn2(y)
         y = autograd.relu(y)
         y = bn2(y)
         y = pooling2(y)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/76779be7/python/singa/autograd.py
----------------------------------------------------------------------
diff --git a/python/singa/autograd.py b/python/singa/autograd.py
index c3986f3..faa9685 100755
--- a/python/singa/autograd.py
+++ b/python/singa/autograd.py
@@ -557,7 +557,8 @@ class Concat(Operation):
             for t in xs:
                 offset += t.shape()[self.axis]
                 self.slice_point.append(offset)
-        return singa.ConcatOn(xs, self.axis)
+        x = singa.VecTensor(list(xs))
+        return singa.ConcatOn(x, self.axis)
 
     def backward(self, dy):
         assert hasattr(
@@ -571,9 +572,9 @@ class Concat(Operation):
         return tuple(dxs)
 
 
-def concat(*xs):
-    # TODO changable axis
-    return Concat()(*xs)
+def concat(xs, axis=0):
+    # xs is a tuple of multiple Tensors
+    return Concat(axis)(*xs)[0]
 
 
 class _Conv2D(Operation):
@@ -741,7 +742,8 @@ class BatchNorm(Layer):
             shape=param_shape, requires_grad=False, stores_grad=False)
 
     def __call__(self, x):
-        assert x.shape[1] == self.channels, 'number of channels dismatched.'
+        assert x.shape[1] == self.channels, 'number of channels dismatched. %d vs %d' % (
+            x.shape[1], self.channels)
 
         self.device_check(x, self.scale, self.bias,
                           self.running_mean, self.running_var)