You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2018/07/01 13:10:32 UTC
[3/7] incubator-singa git commit: SINGA-362 Add functions to support
einsum function 1.add the transpose function in python and use the function
in existing functions 2.one problem need to fix: the tensor cannot be
reshaped after transpose
SINGA-362 Add functions to support einsum function
1.add the transpose function in python and use the function in existing functions
2.one problem need to fix: the tensor cannot be reshaped after transpose
Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/5e8f6a4f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/5e8f6a4f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/5e8f6a4f
Branch: refs/heads/master
Commit: 5e8f6a4f5a5903ff76a1a176a3c135514c0299ca
Parents: 7d25ed9
Author: sheyujian <sh...@me.com>
Authored: Thu May 24 14:10:41 2018 +0800
Committer: sheyujian <sh...@me.com>
Committed: Fri May 25 10:12:39 2018 +0800
----------------------------------------------------------------------
include/singa/core/tensor.h | 2 +-
python/singa/tensor.py | 212 ++++++++++++++++++++++++---------------
src/api/core_tensor.i | 4 +-
src/core/tensor/tensor.cc | 2 +-
test/python/test_tensor.py | 19 +++-
5 files changed, 154 insertions(+), 85 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5e8f6a4f/include/singa/core/tensor.h
----------------------------------------------------------------------
diff --git a/include/singa/core/tensor.h b/include/singa/core/tensor.h
index 3cfafc5..7947d93 100644
--- a/include/singa/core/tensor.h
+++ b/include/singa/core/tensor.h
@@ -189,7 +189,7 @@ class Tensor {
Tensor Transpose() const;
/// Change the axes
- Tensor Transpose(const vector<size_t>& axes) const;
+ Tensor Transpose(const vector<size_t> &axes) const;
/// Copy the meta info with data block shared.
Tensor &operator=(const Tensor &in);
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5e8f6a4f/python/singa/tensor.py
----------------------------------------------------------------------
diff --git a/python/singa/tensor.py b/python/singa/tensor.py
index d559ecb..5f38ef2 100644
--- a/python/singa/tensor.py
+++ b/python/singa/tensor.py
@@ -136,9 +136,20 @@ class Tensor(object):
'''
return self.data.transpose()
- # def transpose(self):
-
- # return self.data
+ def transpose(self,axes = None):
+ '''
+ To transpose the tensor
+ '''
+ if axes == None:
+ tshape = [self.shape[x] for x in range(len(self.shape))]
+ self.shape = tuple(tshape)
+ self.data = self.data.Transpose()
+ else:
+ if(len(axes) != len(self.shape)):
+ raise ValueError('dimensions do not match')
+ tshape = [self.shape[x] for x in axes]
+ self.shape = tuple(tshape)
+ self.data = self.data.Transpose(list(axes))
def size(self): # TODO(wangwei) compute size
'''
@@ -259,24 +270,61 @@ class Tensor(object):
'''
return _call_singa_func(self.data.Clone)
- def repeat_(self, repeats, axis):
- ret = CTensor()
- if isinstance(repeats, int):
- if axis == 9999:
+ def repeat(self, repeats, axis):
+ # ret = CTensor()
+ # if isinstance(repeats, int):
+ # if axis == 9999:
+ # Repeats = [repeats,]
+ # ret = self.data.Repeat(Repeats, axis)
+ # else:
+ # Repeats = [repeats,]
+ # ret = self.data.Repeat(Repeats, axis)
+
+
+ # elif isinstance(repeats, tuple) or isinstance(repeats, list):
+ # if axis == 9999:
+ # ret = self.data.Repeat(list(repeats), axis)
+
+ # elif axis >= 0:
+ # ret = self.data.Repeat(list(repeats), axis)
+ # return ret
+
+ t_ndim = self.ndim()
+ if isinstance(repeats, int) or isinstance(repeats, long):
+ if repeats < 0:
+ raise ValueError("'repeats' should not be negative: {}".format(repeats))
+ if axis != None and axis < 0:
+ axis += t_ndim
+ # broadcast = True
+ if axis == None:
+ axis = 9999
+ self.shape = (product(self.shape)*repeats,)
Repeats = [repeats,]
- ret = self.data.Repeat(Repeats, axis)
- else:
+ self.data = self.data.Repeat(Repeats, axis)
+ elif axis >= 0:
+ t_shape = list(self.shape)
+ t_shape[axis] = self.shape[axis]*repeats
+ self.shape = tuple(t_shape)
Repeats = [repeats,]
- ret = self.data.Repeat(Repeats, axis)
- return ret
+ self.data = self.data.Repeat(Repeats, axis)
elif isinstance(repeats, tuple) or isinstance(repeats, list):
- if axis == 9999:
- ret = self.data.Repeat(list(repeats), axis)
-
+ for rep in repeats:
+ if rep < 0:
+ raise ValueError("'repeats' should be int or sequence: {}".format(repeats))
+
+ if axis != None and axis < 0:
+ axis += t_ndim
+ if axis == None:
+ axis = 9999
+ raise ValueError("when axis us None, 'repeats' should be int: {}".format(repeats))
elif axis >= 0:
- ret = self.data.Repeat(list(repeats), axis)
- return ret
+ t_shape = list(self.shape)
+ t_shape[axis] = sum(repeats)
+ self.shape = tuple(t_shape)
+ self.data = self.data.Repeat(list(repeats), axis)
+ else:
+ raise ValueError('repeats should be int or sequence')
@@ -580,6 +628,20 @@ def reshape(t, s):
'''
return _call_singa_func(singa.Reshape, t.data, s)
+def Reshape(t,s):
+ ret = t.deepcopy()
+ ret.reshape(s)
+ return ret
+
+def transpose(t,axes = None):
+ '''
+ Returns:
+ the transposed tensor
+ '''
+ ret = t.deepcopy()
+ ret.transpose(axes)
+ return ret
+
def copy_data_to_from(dst, src, size, dst_offset=0, src_offset=0):
'''Copy the data between two Tensor instances which could be on different
@@ -1082,26 +1144,50 @@ def einsum(ops, *args):
reshape_A = list(A.shape) + broadcast_a
reshape_B = list(B.shape) + broadcast_b
- A_ = to_numpy(A)
- B_ = to_numpy(B)
+ # A_ = to_numpy(A)
+ # B_ = to_numpy(B)
- mult_A = np.repeat(A_, np.product(broadcast_a)).reshape(
- reshape_A).transpose(transpose_A)
- mult_B = np.repeat(B_, np.product(broadcast_b)).reshape(
- reshape_B).transpose(transpose_B)
+ # mult_A = np.repeat(A_, np.product(broadcast_a)).reshape(
+ # reshape_A).transpose(transpose_A)
+ # mult_B = np.repeat(B_, np.product(broadcast_b)).reshape(
+ # reshape_B).transpose(transpose_B)
- if mult_A.shape != mult_B.shape:
- raise ValueError("Error: matrix dimension mismatch")
- res_ = np.multiply(mult_A, mult_B)
+ # if mult_A.shape != mult_B.shape:
+ # raise ValueError("Error: matrix dimension mismatch")
+ # res_ = np.multiply(mult_A, mult_B)
# reduce the axis and find the final transpose for the output
+ # sum_R = sorted(sums, reverse=True)
+ # for i in sum_R:
+ # res_ = res_.sum(axis=i)
+ # transpose_res = [sorted(list(outputops)).index(x) for x in list(outputops)]
+ # res_ = res_.transpose(transpose_res)
+ # res = from_numpy(res_)
+ # return res
+ if len(broadcast_a) == 0:
+ broadcast_a = [1]
+ if len(broadcast_b) == 0:
+ broadcast_b = [1]
+ mult_A = repeat(A, product(broadcast_a))
+ mult_A.reshape(reshape_A)
+ mult_A = transpose(mult_A,transpose_A)
+ mult_B = repeat(B, product(broadcast_b))
+ mult_B.reshape(reshape_B)
+ mult_B = transpose(mult_B, transpose_B)
+
+ if mult_A.shape != mult_B.shape:
+ raise ValueError("Error: matrix dimension mismatch")
+ res = eltwise_mult(mult_A, mult_B)
sum_R = sorted(sums, reverse=True)
for i in sum_R:
- res_ = res_.sum(axis=i)
+ res = sum2(res, axis=i)
transpose_res = [sorted(list(outputops)).index(x) for x in list(outputops)]
- res_ = res_.transpose(transpose_res)
- res = from_numpy(res_)
+ res = transpose(res, transpose_res)
+
return res
+
+
+
def sum2(t, axis=None, out=None):
'''Sum of tensor elements over given axis
@@ -1162,59 +1248,12 @@ def sum2(t, axis=None, out=None):
return ret
def repeat (t, repeats, axis = None):
- t_ndim = t.ndim()
- if isinstance(repeats, int):
- if repeats < 0:
- raise ValueError("'repeats' should not be negative: {}".format(repeats))
- if axis != None and axis < 0:
- axis += t_ndim
- # broadcast = True
- if axis == None:
- axis = 9999
- ret = Tensor()
- ret.shape = (product(t.shape)*repeats,)
- # Repeats = [repeats,]
- ret.data = t.repeat_(repeats, axis)
- # ret.data = t.data.Repeat(Repeats, axis)
- elif axis >= 0:
- ret = Tensor()
- t_shape = list(t.shape)
- t_shape[axis] = t.shape[axis]*repeats
- print(t_shape)
- ret.shape = tuple(t_shape)
- print(ret.shape)
- # Repeats = [repeats,]
- ret.data = t.repeat_(repeats, axis)
- # ret.data = t.data.Repeat(Repeats, axis)
- print(ret.shape)
-
- elif isinstance(repeats, tuple) or isinstance(repeats, list):
- for rep in repeats:
- if rep < 0:
- raise ValueError("'repeats' should be int or sequence: {}".format(repeats))
-
- if axis != None and axis < 0:
- axis += t_ndim
- if axis == None:
- axis = 9999
- ret = Tensor()
- ret.shape = (sum(repeats), )
- t_shape = list(t.shape)
- ret.data = t.repeat_(repeats, axis)
- #ret = t.data.Repeat(list(repeats), axis)
-
- elif axis >= 0:
- ret = Tensor()
- t_shape = list(t.shape)
- t_shape[axis] = sum(repeats)
- ret.shape = tuple(t_shape)
- ret.data = t.repeat_(repeats, axis)
- #ret = t.data.Repeat(list(repeats), axis)
- else:
- raise ValueError('repeats should be int or sequence')
+
+ ret = t.deepcopy()
+ ret.repeat(repeats,axis)
return ret
-
+
def tensordot (A,B,axes=2):
"""Returns the tensor multiplication of two tensors along specified axes.
@@ -1317,8 +1356,21 @@ def tensordot (A,B,axes=2):
B_ = to_numpy(B)
at_ = np.transpose(A_,newaxes_a).reshape(newshape_a)
bt_ = np.transpose(B_,newaxes_b).reshape(newshape_b)
+ # print(at_)
+ # print(bt_)
at = from_numpy(at_)
bt = from_numpy(bt_)
+
+ # A = transpose(A, newaxes_a)
+ # B = transpose(B, newaxes_b)
+ # A =
+ # at = Reshape(A, newshape_a)
+ # bt = Reshape(B, newshape_b)
+ # _at = to_numpy(at)
+ # _bt = to_numpy(bt)
+ # print(_at)
+ # print(_bt)
+
res = mult(at,bt)
if len(olda + oldb) == 0:
olda = [1]
@@ -1326,7 +1378,7 @@ def tensordot (A,B,axes=2):
res.reshape(tuple(olda + oldb))
else:
res.reshape(tuple(olda + oldb))
- print(res.shape)
+ # print(res.shape)
# res_ = np.dot(at_, bt_)
# res = from_numpy(res_.reshape(olda + oldb))
#reshape the result
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5e8f6a4f/src/api/core_tensor.i
----------------------------------------------------------------------
diff --git a/src/api/core_tensor.i b/src/api/core_tensor.i
index 756fe60..587dddd 100644
--- a/src/api/core_tensor.i
+++ b/src/api/core_tensor.i
@@ -100,8 +100,10 @@ namespace singa{
const DataType data_type() const;
const std::vector<size_t> &shape() const;
const size_t shape(size_t idx) const;
- size_t nDim() const;
bool transpose() const;
+ size_t nDim() const;
+ Tensor Transpose() const;
+ Tensor Transpose(const std::vector<size_t> &axes) const;
size_t Size() const;
size_t MemSize() const;
void Reshape(const std::vector<size_t> &shape);
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5e8f6a4f/src/core/tensor/tensor.cc
----------------------------------------------------------------------
diff --git a/src/core/tensor/tensor.cc b/src/core/tensor/tensor.cc
index b75ac40..de2ea8a 100644
--- a/src/core/tensor/tensor.cc
+++ b/src/core/tensor/tensor.cc
@@ -424,7 +424,7 @@ Tensor Tensor::Transpose() const {
//transpose with axes
// TODO(wangwei) the shape and axes should match
-Tensor Tensor::Transpose(const vector<size_t>& axes) const {
+Tensor Tensor::Transpose(const vector<size_t> &axes) const {
// if(axes.size() != shape_.size()){
// std::cout << "Warning: Size of input axes doesn't match size of shape" << std::endl;
// return void();
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5e8f6a4f/test/python/test_tensor.py
----------------------------------------------------------------------
diff --git a/test/python/test_tensor.py b/test/python/test_tensor.py
index a47bbff..7d83677 100644
--- a/test/python/test_tensor.py
+++ b/test/python/test_tensor.py
@@ -165,6 +165,21 @@ class TestTensorMethods(unittest.TestCase):
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a-b), 0.)
+ def test_transpose(self):
+ a = np.array([1.1,1.1,1.1,1.1,1.4,1.3,1.1,1.6,1.1,1.1,1.1,1.2])
+ a = np.reshape(a,(2,3,2))
+ ta = tensor.from_numpy(a)
+
+ A1 = np.transpose(a)
+ tA1 = tensor.transpose(ta)
+ TA1 = tensor.to_numpy(tA1)
+ A2 = np.transpose(a,[0,2,1])
+ tA2 = tensor.transpose(ta,[0,2,1])
+ TA2 = tensor.to_numpy(tA2)
+
+ self.assertAlmostEqual(np.sum(TA1 - A1), 0.,places=3)
+ self.assertAlmostEqual(np.sum(TA2 - A2), 0.,places=3)
+
def test_einsum(self):
a = np.array([1.1,1.1,1.1,1.1,1.4,1.3,1.1,1.6,1.1,1.1,1.1,1.2])
@@ -193,8 +208,8 @@ class TestTensorMethods(unittest.TestCase):
ta_repeat2 = tensor.repeat(ta, 4, axis = 1)
a_repeat2 = np.repeat(a, 4, axis = 1)
Ta_repeat2 = tensor.to_numpy(ta_repeat2)
- print(Ta_repeat2)
- print(a_repeat2)
+ # print(Ta_repeat2)
+ # print(a_repeat2)
self.assertAlmostEqual(np.sum(Ta_repeat1 - a_repeat1), 0., places=3)
self.assertAlmostEqual(np.sum(Ta_repeat2 - a_repeat2), 0., places=3)