You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2018/07/01 13:10:35 UTC
[6/7] incubator-singa git commit: SINGA-362 Add functions to support
einsum function delete the repetitive reshape and transform,
which are the same as yisen
SINGA-362 Add functions to support einsum function
delete the repetitive reshape and transform, which are the same as yisen
Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/10f3aa1d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/10f3aa1d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/10f3aa1d
Branch: refs/heads/master
Commit: 10f3aa1d7e41c9f89ee3a7ef90644b492fbff543
Parents: 4940fef
Author: sheyujian <sh...@me.com>
Authored: Sun Jul 1 12:11:15 2018 +0800
Committer: sheyujian <sh...@me.com>
Committed: Sun Jul 1 12:49:00 2018 +0800
----------------------------------------------------------------------
include/singa/core/tensor.h | 10 +--
src/core/tensor/tensor.cc | 131 ++++++++++++++-------------------
src/core/tensor/tensor_math.h | 1 +
src/core/tensor/tensor_math_cpp.h | 14 ----
4 files changed, 57 insertions(+), 99 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/10f3aa1d/include/singa/core/tensor.h
----------------------------------------------------------------------
diff --git a/include/singa/core/tensor.h b/include/singa/core/tensor.h
index d9bb069..dca19b0 100644
--- a/include/singa/core/tensor.h
+++ b/include/singa/core/tensor.h
@@ -133,8 +133,8 @@ class Tensor {
size_t MemSize() const { return block_->size(); }
/// Reset the tensor shape, it may reallocate block, if MemSize() changes.
- // void Reshape(const Shape &shape);
- // void Reshape(Shape &&shape);
+ Tensor Reshape(const Shape &shape);
+ Tensor Reshape(Shape &&shape);
/// Reset the shape, device, and data type as given tensor.
/// If block size changes, then reallocate a new block.
@@ -191,10 +191,6 @@ class Tensor {
/// Change the axes
Tensor Transpose(const vector<size_t> &axes) const;
- Tensor Reshape(const Shape &shape);
-
- Tensor Reshape(Shape &&shape);
-
/// Copy the meta info with data block shared.
Tensor &operator=(const Tensor &in);
@@ -309,7 +305,6 @@ Tensor Sign(const Tensor &in);
Tensor Sqrt(const Tensor &in);
Tensor Square(const Tensor &in);
Tensor Tanh(const Tensor &in);
-Tensor Transform(const Tensor &in);
void Abs(const Tensor &in, Tensor *out);
void Exp(const Tensor &in, Tensor *out);
@@ -320,7 +315,6 @@ void Sign(const Tensor &in, Tensor *out);
void Sqrt(const Tensor &in, Tensor *out);
void Square(const Tensor &in, Tensor *out);
void Tanh(const Tensor &in, Tensor *out);
-void Transform(const Tensor &in, Tensor *out);
/// Element-wise opeartion, out[i]=in[i]^x
template <typename SType>
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/10f3aa1d/src/core/tensor/tensor.cc
----------------------------------------------------------------------
diff --git a/src/core/tensor/tensor.cc b/src/core/tensor/tensor.cc
index 3bf0a77..39ab12d 100644
--- a/src/core/tensor/tensor.cc
+++ b/src/core/tensor/tensor.cc
@@ -124,61 +124,41 @@ void Tensor::ResetLike(const Tensor &in) {
strides_ = in.strides_;
}
-Tensor Tensor::Reshape(const Shape &shape) {
- if (strides_.size() == 0)
- strides_.push_back(1);
-
- if (Product(shape_) != Product(shape)) {
- if (block_ != nullptr && block_->DecRefCount() == 0)
- device_->FreeBlock(block_);
- block_ = device_->NewBlock((int)(Product(shape) * SizeOf(data_type_)));
- shape_ = shape;
- generate_strides();
- return *this;
-
- } else if (transpose()) {
- Tensor t(shape_, device_, data_type_);
- t.block_ = t.device()->NewBlock((int)(Product(shape) * SizeOf(data_type_)));
- singa::Transform(*this, &t);
- t.shape_ = shape;
- return t;
- }
-
- shape_ = shape;
- generate_strides();
- Tensor t(shape, device_, data_type_);
- t.block_ = block_;
- t.block_->IncRefCount();
- return t;
-}
-
-Tensor Tensor::Reshape(Shape &&shape) {
- if (strides_.size() == 0)
- strides_.push_back(1);
-
- if (Product(shape_) != Product(shape)) {
- if (block_ != nullptr && block_->DecRefCount() == 0)
- device_->FreeBlock(block_);
- block_ = device_->NewBlock((int)(Product(shape) * SizeOf(data_type_)));
- shape_ = std::move(shape);
- generate_strides();
- return *this;
-
- } else if (transpose()) {
- Tensor t(shape_, device_, data_type_);
- t.block_ = t.device()->NewBlock((int)(Product(shape) * SizeOf(data_type_)));
- singa::Transform(*this, &t);
- t.shape_ = shape;
- return t;
- }
-
- shape_ = shape;
- generate_strides();
- Tensor t(shape, device_, data_type_);
- t.block_ = block_;
- t.block_->IncRefCount();
- return t;
-}
+// if tensor is not transposed yet i.e strides == 1,
+// then we simply change the shape and generate new default strides
+// if tensor is already transposed i.e strides != 1,
+// it should be copied to a new tensor with newly generated default strides
+// TODO(wangwei) raise error if the shape not match
+
+// void Tensor::Reshape(const Shape &shape) {
+// if (strides_.size() == 0)
+// strides_.push_back(1);
+
+// if (Product(shape_) != Product(shape)) {
+// if (block_ != nullptr && block_->DecRefCount() == 0)
+// device_->FreeBlock(block_);
+// block_ = device_->NewBlock((int)(Product(shape) * SizeOf(data_type_)));
+// } else if (transpose()) {
+// LOG(FATAL) << "Reshape Error: Reshape called on tranposed tensor. Not implemented yet." ;
+// }
+// shape_ = shape;
+// generate_strides();
+// }
+
+// void Tensor::Reshape(Shape &&shape) {
+// if (strides_.size() == 0)
+// strides_.push_back(1);
+
+// if (Product(shape_) != Product(shape)) {
+// if (block_ != nullptr && block_->DecRefCount() == 0)
+// device_->FreeBlock(block_);
+// block_ = device_->NewBlock((int)(Product(shape) * SizeOf(data_type_)));
+// } else if (transpose()) {
+// LOG(FATAL) << "Reshape Error: Reshape called on tranposed tensor. Not implemented yet." ;
+// }
+// shape_ = std::move(shape);
+// generate_strides();
+// }
void Tensor::AsType(const DataType type) {
if (data_type_ != type) {
@@ -356,15 +336,6 @@ void Tensor::ToProto(singa::TensorProto *proto) const {
}
}
-Tensor Tensor::Clone(std::shared_ptr<Device> device) const {
- if (device == nullptr) device = device_;
- Tensor t(shape_, device_, data_type_);
- //t.transpose_ = transpose_;
- t.strides_ = strides_;
- t.CopyData(*this);
- return t;
-}
-
Tensor Tensor::Repeat(vector<size_t> repeats, int axis, std::shared_ptr<Device> device) {
if (device == nullptr) device = device_;
vector<size_t> tshape;
@@ -407,7 +378,15 @@ Tensor Tensor::Repeat(vector<size_t> repeats, int axis, std::shared_ptr<Device>
return t;
}
-//yisen todo
+Tensor Tensor::Clone(std::shared_ptr<Device> device) const {
+ if (device == nullptr) device = device_;
+ Tensor t(shape_, device_, data_type_);
+ //t.transpose_ = transpose_;
+ t.strides_ = strides_;
+ t.CopyData(*this);
+ return t;
+}
+
Tensor Tensor::T() const {
// this function only works for 2d tensors
CHECK_EQ(shape_.size(), 2u);
@@ -494,18 +473,17 @@ Tensor &Tensor::operator=(Tensor &&in) {
return *this;
}
-//yisen todo
-Tensor Reshape(const Tensor &in, const Shape &s) {
- Tensor out(in);
- out = out.Reshape(s);
- return out;
-}
+// Tensor Reshape(const Tensor &in, const Shape &s) {
+// // Tensor out(in);
+// // out.Reshape(s);
+// return out;
+// }
-Tensor Reshape(const Tensor &in, Shape &&s) {
- Tensor out(in);
- out = out.Reshape(std::move(s));
- return out;
-}
+// Tensor Reshape(const Tensor &in, Shape &&s) {
+// // Tensor out(in);
+// // out.Reshape(std::move(s));
+// return out;
+// }
#define GenUnaryTensorArgMemberFn(op, fn) \
Tensor &Tensor::op(const Tensor &in) { \
@@ -753,7 +731,6 @@ GenUnaryTensorFn(Sign);
GenUnaryTensorFn(Sqrt);
GenUnaryTensorFn(Square);
GenUnaryTensorFn(Tanh);
-GenUnaryTensorFn(Transform);
#define EltwiseBinaryTensorFn(fn, lhs, rhs, ret) \
do { \
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/10f3aa1d/src/core/tensor/tensor_math.h
----------------------------------------------------------------------
diff --git a/src/core/tensor/tensor_math.h b/src/core/tensor/tensor_math.h
index 388c010..f438fc6 100644
--- a/src/core/tensor/tensor_math.h
+++ b/src/core/tensor/tensor_math.h
@@ -258,6 +258,7 @@ template <typename DType, typename Lang>
void Transform(const Tensor &in, Tensor *out, Context *ctx) {
LOG(FATAL) << "Transform Not Implemented";
}
+
// **************************************
// Random functions
// **************************************
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/10f3aa1d/src/core/tensor/tensor_math_cpp.h
----------------------------------------------------------------------
diff --git a/src/core/tensor/tensor_math_cpp.h b/src/core/tensor/tensor_math_cpp.h
index e302b04..bfdd026 100644
--- a/src/core/tensor/tensor_math_cpp.h
+++ b/src/core/tensor/tensor_math_cpp.h
@@ -427,20 +427,6 @@ void Tanh<float, lang::Cpp>(const Tensor& in, Tensor* out,
}
template <>
-void Transform<float, lang::Cpp>(const Tensor& in, Tensor* out,
- Context *ctx) {
- float *outPtr = static_cast<float *>(out->block()->mutable_data());
- const float *inPtr = static_cast<const float *>(in.block()->data());
- vector<int> traversal_info = generate_traversal_info(in);
- vector<int> shape_multipliers = generate_shape_multipliers(in);
-
- for (size_t i = 0; i < in.Size(); i++) {
- outPtr[i] = inPtr[traversal_info[in.shape().size()]];
- traverse_next(in, shape_multipliers, traversal_info, i + 1);
- }
-}
-
-template <>
void Bernoulli<float, lang::Cpp>(const float p, Tensor* out,
Context *ctx) {
std::bernoulli_distribution distribution(p);