You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2016/06/03 07:48:34 UTC

[29/60] incubator-singa git commit: SINGA-167 - Add Tensor Math function APIs

SINGA-167 - Add Tensor Math function APIs

Add APIs in math.h including linear algebra and random functions.
TODO 1. add APIs for nn functions. 2. Implement Tensor math functions
using APIs from math.h.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/4a2ffb7e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/4a2ffb7e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/4a2ffb7e

Branch: refs/heads/dev
Commit: 4a2ffb7e85eedc0c84822962110bb8adf9ea3756
Parents: 9bccfb6
Author: Wei Wang <wa...@comp.nus.edu.sg>
Authored: Tue May 10 20:19:04 2016 +0800
Committer: Wei Wang <wa...@comp.nus.edu.sg>
Committed: Tue May 10 20:19:04 2016 +0800

----------------------------------------------------------------------
 include/singa/core/math.h   | 220 ++++++++++++++++++++++++++++++++++++++-
 include/singa/core/tensor.h |  10 +-
 src/core/math/cpp_math.cc   |  54 ++++++++++
 src/core/math/cpu_math.cc   |  54 ----------
 src/core/tensor/tensor.cc   |   2 +-
 test/singa/test_cpp_math.cc |  27 +++++
 6 files changed, 302 insertions(+), 65 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a2ffb7e/include/singa/core/math.h
----------------------------------------------------------------------
diff --git a/include/singa/core/math.h b/include/singa/core/math.h
index 883abc8..511d9ee 100644
--- a/include/singa/core/math.h
+++ b/include/singa/core/math.h
@@ -43,19 +43,231 @@ class OpConf {
   }
 };
 
+// ================Linear algebra functions====================================
+template <typename DType, typename Lib>
+void Sum(int count, const Blob* input, DType* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+template <typename DType, typename Lib>
+void Abs(int count, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+template <typename DType, typename Lib>
+void Sign(int count, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// Base is e, Neper number
+template <typename DType, typename Lib>
+void Exp(int count, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// Natual logarithm, the base is e, Neper number.
+template <typename DType, typename Lib>
+void Log(int count, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+template <typename DType, typename Lib>
+void Sqrt(int count, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+template <typename DType, typename Lib>
+void Tanh(int count, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+template <typename DType, typename Lib>
+void Sigmoid(int count, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// Do v^x for every v from the input tensor
+template <typename DType, typename Lib>
+void Pow(int count, DType x, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// Do v^x for every v from the lhs and every x from rhs
+template <typename DType, typename Lib>
+void Pow(int count, const Blob* lhs, const Blob* rhs, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// Clamp every element into [low, high]
+template <typename DType, typename Lib>
+void Clamp(int count, DType low, DType high, const Blob* input, Blob* ret,
+           Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// ret = x + input
+template <typename DType, typename Lib>
+void Add(int count, DType x, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// ret = x * input
+/// div could be enabled by calling Mult with 1/x
+template <typename DType, typename Lib>
+void Mult(int count, DType x, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// ret = lhs + rhs
 template <typename DType, typename Lib>
 void Add(int count, const Blob* lhs, const Blob* rhs, Blob* ret, Context* ctx) {
   LOG(FATAL) << "Not Implemented";
 }
 
-// ================Neural Net operations======================================
+/// ret = lhs - rhs
+template <typename DType, typename Lib>
+void Sub(int count, const Blob* lhs, const Blob* rhs, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// ret = lhs * rhs
+template <typename DType, typename Lib>
+void Mult(int count, const Blob* lhs, const Blob* rhs, Blob* ret,
+          Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// ret = lhs / rhs
+template <typename DType, typename Lib>
+void Div(int count, const Blob* lhs, const Blob* rhs, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// outer-product.
+/// lhs and rhs are vectors of len m and n. ret is matrix of shape m * n
+template <typename DType, typename Lib>
+void Outer(int m, int n, const Blob* lhs, const Blob* rhs, Blob* ret,
+           Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+// TODO(wangwei) unify SumRow and SumCol.
+/// Sum the rows of the input matrix into a vector
+template <typename DType, typename Lib>
+void SumRow(int nrow, int ncol, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+/// Sum the rows of the input matrix into a vector
+template <typename DType, typename Lib>
+void SumCol(int nrow, int ncol, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+// TODO(wangwei) unify AddRow and AddCol.
+/// Add the vector v to every row of A as the row of ret
+template <typename DType, typename Lib>
+void AddRow(int nrow, int ncol, const Blob* A, const Blob* v, Blob* ret,
+            Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// Add the vector v to every column of A as the column of ret
+template <typename DType, typename Lib>
+void AddCol(int nrow, int ncol, const Blob* A, const Blob* v, Blob* ret,
+            Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+// ===== BLAS functions, ref to http://docs.nvidia.com/cuda/cublas
+// ===== Level 1
+/// return the index of the element with the max value.
+template <typename DType, typename Lib>
+void Amax(int count, const Blob* input, int* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// return the index of the element with the min value.
+template <typename DType, typename Lib>
+void Amin(int count, const Blob* input, int* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+/// ret = sum |x| for all x in input
+template <typename DType, typename Lib>
+void Asum(int count, const Blob* input, DType* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// ret = alpha * input + ret
+template <typename DType, typename Lib>
+void Axpy(int count, DType alpha, const Blob* input, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// ret *= x
+template <typename DType, typename Lib>
+void Scale(int count, DType x, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+template <typename DType, typename Lib>
+void Dot(int count, const Blob* lhs, const Blob* rhs, DType* ret,
+         Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+// ===== Level 2
+/// ret = alpha * op(A) * v + beta * ret.
+/// op(A) = A if trans = false; A^T otherwise; rows(A) = m, cols(A) = n.
+template <typename DType, typename Lib>
+void GEMV(bool trans, int m, int n, DType alpha, const Blob* A, const Blob* v,
+          DType beta, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+// ===== Level 3
+/// ret = alpha * op(A) * op(B) + beta * ret.
+/// op(A) = A if trans = false; A^T otherwise; rows(A) = m, cols(A) = n.
+template <typename DType, typename Lib>
+void GEMV(bool transA, bool transB, int m, int n, int k, DType alpha,
+          const Blob* A, const Blob* B, DType beta, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+// ================Random functions===========================================
+// The random generator should be extracted from ctx.
+template <typename DType, typename Lib>
+void Uniform(int count, DType low, DType high, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+template <typename DType, typename Lib>
+void Gaussian(int count, DType mean, DType std, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// each element of ret would be 1 with prob p and 0 with 1-p. 0<= p <= 1
+template <typename DType, typename Lib>
+void Bernoulli(int count, DType p, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
+
+/// ret[i] would be 1 with prob p[i] and 0 with 1-p[i]. 0<= p[i] <= 1
+template <typename DType, typename Lib>
+void Bernoulli(int count, const Blob* p, Blob* ret, Context* ctx) {
+  LOG(FATAL) << "Not Implemented";
+}
 
-class ConvConf : public OpConf {};
+// ================Neural net functions=======================================
+/// Do 2D conv.
+/// c is input image channel, w is input width, h is input height
+/// nb_kernel is output channel, kw, and kh are kenerl width and height
+/*
 template <typename DType, typename Lib>
-void Conv(const OpConf* conf, const Blob* input, const Blob* W, const Blob* b,
-          Blob* ret, Context* ctx) {
+void Conv2D(int c, int w, int h, int nb_kernel, int kw, int kh,
+           const Blob* input, const Blob* kernel, Blob* ret, Context* ctx) {
   LOG(FATAL) << "Not Implemented";
 }
+*/
 }  // namespace singa
 
 #endif  // SINGA_CORE_MATH_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a2ffb7e/include/singa/core/tensor.h
----------------------------------------------------------------------
diff --git a/include/singa/core/tensor.h b/include/singa/core/tensor.h
index c8982ce..725f657 100644
--- a/include/singa/core/tensor.h
+++ b/include/singa/core/tensor.h
@@ -142,6 +142,7 @@ class Tensor {
   void operator=(Tensor&& t);
 
   void operator+=(const Tensor& t);
+  /*
   void operator+=(Tensor&& t);
   void operator-=(const Tensor& t);
   void operator-=(Tensor&& t);
@@ -156,7 +157,6 @@ class Tensor {
   template <typename T>
   void operator+=(const T x);
 
-  /*
   /// T is a scalar type
   template <typename T>
   void operator-=(const T x);
@@ -180,9 +180,9 @@ class Tensor {
   void Gaussian(float mean, float std);
 
   /// save Tensor into a proto msg
-  void ToProto(TensorProto* t);
+  // void ToProto(TensorProto* t);
   /// load Tensor from proto msg
-  void FromProto(const TensorProto& t);
+  // void FromProto(const TensorProto& t);
   */
  protected:
   bool transpose_ = false;
@@ -198,7 +198,6 @@ class Tensor {
 // class SparseTensor : public Tensor {};
 
 // ==================Simple Linear Algebra Operations=========================
-
 /*
 Tensor Tanh(const Tensor& t);
 Tensor Log(const Tensor& t);
@@ -216,9 +215,9 @@ void CopyData(Tensor* dst,
 
 Tensor operator+(const Tensor& lhs, const Tensor& rhs);
 void Add(const Tensor& lhs, const Tensor& rhs, Tensor* ret);
+/*
 Tensor operator-(const Tensor& lhs, const Tensor& rhs);
 void Sub(const Tensor& lhs, const Tensor& rhs, Tensor* ret);
-/*
 Tensor operator*(const Tensor& lhs, const Tensor& rhs);
 void operator*(const Tensor& lhs, const Tensor& rhs, Tensor* ret);
 Tensor operator/(const Tensor& lhs, const Tensor& rhs);
@@ -259,7 +258,6 @@ void Conv(const OpConf* conf,
           const Tensor &b,
           Tensor* ret);
 
-
 //================Random operations==========================================
 Tensor Uniform(float low, float high, const Shape& shape, Device* dev);
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a2ffb7e/src/core/math/cpp_math.cc
----------------------------------------------------------------------
diff --git a/src/core/math/cpp_math.cc b/src/core/math/cpp_math.cc
new file mode 100644
index 0000000..638d693
--- /dev/null
+++ b/src/core/math/cpp_math.cc
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "singa/core/math.h"
+#include "singa/core/common.h"
+
+#ifdef USE_CBLAS
+#include <cblas.h>
+#endif
+
+namespace singa {
+template<>
+void Add<float, lib::Cpp>(int count,
+                     const Blob* lhs,
+                     const Blob* rhs,
+                     Blob* ret,
+                     Context* ctx) {
+  // CHECK_EQ(ctx->stream, nullptr);
+  float *dptr = static_cast<float*>(ret->mutable_data());
+  const float *lptr = static_cast<const float*>(lhs->data());
+  const float *rptr = static_cast<const float*>(rhs->data());
+  for (int i = 0; i < count; i++) {
+    dptr[i] = lptr[i] + rptr[i];
+  }
+}
+
+#ifdef USE_CBLAS
+template<>
+void Dot<float, lib::Cpp>(int count,
+                     const Blob* lhs,
+                     const Blob* rhs,
+                     float* ret,
+                     Context* ctx) {
+  float dptr = ret->mutable_data(), lptr = lhs->data(), rptr = rhs->data();
+  *ret = cblas_sdot(count, lptr, 1, rptr, 1);
+}
+
+#endif
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a2ffb7e/src/core/math/cpu_math.cc
----------------------------------------------------------------------
diff --git a/src/core/math/cpu_math.cc b/src/core/math/cpu_math.cc
deleted file mode 100644
index 638d693..0000000
--- a/src/core/math/cpu_math.cc
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "singa/core/math.h"
-#include "singa/core/common.h"
-
-#ifdef USE_CBLAS
-#include <cblas.h>
-#endif
-
-namespace singa {
-template<>
-void Add<float, lib::Cpp>(int count,
-                     const Blob* lhs,
-                     const Blob* rhs,
-                     Blob* ret,
-                     Context* ctx) {
-  // CHECK_EQ(ctx->stream, nullptr);
-  float *dptr = static_cast<float*>(ret->mutable_data());
-  const float *lptr = static_cast<const float*>(lhs->data());
-  const float *rptr = static_cast<const float*>(rhs->data());
-  for (int i = 0; i < count; i++) {
-    dptr[i] = lptr[i] + rptr[i];
-  }
-}
-
-#ifdef USE_CBLAS
-template<>
-void Dot<float, lib::Cpp>(int count,
-                     const Blob* lhs,
-                     const Blob* rhs,
-                     float* ret,
-                     Context* ctx) {
-  float dptr = ret->mutable_data(), lptr = lhs->data(), rptr = rhs->data();
-  *ret = cblas_sdot(count, lptr, 1, rptr, 1);
-}
-
-#endif
-}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a2ffb7e/src/core/tensor/tensor.cc
----------------------------------------------------------------------
diff --git a/src/core/tensor/tensor.cc b/src/core/tensor/tensor.cc
index e03fd22..8fdc2ed 100644
--- a/src/core/tensor/tensor.cc
+++ b/src/core/tensor/tensor.cc
@@ -201,7 +201,6 @@ void Sub(const Tensor& lhs, const Tensor& rhs, Tensor *ret) {
         , {lhs.blob(), rhs.blob()}, {ret->blob()});
       });
 }
-*/
 
 // ================Blas operations============================================
 
@@ -218,5 +217,6 @@ void Conv(const OpConf* conf, const Tensor& input, const Tensor& W,
         {input.blob(), W.blob(), b.blob()}, {ret->blob()});
   });
 }
+*/
 
 }  // namespace singa

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4a2ffb7e/test/singa/test_cpp_math.cc
----------------------------------------------------------------------
diff --git a/test/singa/test_cpp_math.cc b/test/singa/test_cpp_math.cc
new file mode 100644
index 0000000..268785d
--- /dev/null
+++ b/test/singa/test_cpp_math.cc
@@ -0,0 +1,27 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#include "gtest/gtest.h"
+#include "singa/core/math.h"
+
+TEST(CppMath, Add) {
+
+}