You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by ka...@apache.org on 2015/11/16 07:09:02 UTC

[16/19] incubator-singa git commit: SINGA-80 New Blob Level and Address Level Math Operation Interface

SINGA-80 New Blob Level and Address Level Math Operation Interface

Move cpu_asum from Blob class outside to Asum(const Blob<Dtype>&).
Asum and Scale are implemented using cblas_sasum and cblas_sscale, i.e., only consider the float type.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/32e09219
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/32e09219
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/32e09219

Branch: refs/heads/master
Commit: 32e09219129a1dba359ed02760754e1c63e1480f
Parents: 21cfc21
Author: Wei Wang <wa...@comp.nus.edu.sg>
Authored: Tue Nov 10 22:20:52 2015 +0800
Committer: Wei Wang <wa...@comp.nus.edu.sg>
Committed: Tue Nov 10 22:23:04 2015 +0800

----------------------------------------------------------------------
 include/singa/utils/blob.h      | 53 ++++++++++++++++++++++++++++++------
 include/singa/utils/math_addr.h |  4 +++
 include/singa/utils/math_blob.h | 23 ++++++++++++----
 src/neuralnet/layer.cc          |  9 +++---
 src/utils/blob.cc               | 25 ++---------------
 5 files changed, 74 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/32e09219/include/singa/utils/blob.h
----------------------------------------------------------------------
diff --git a/include/singa/utils/blob.h b/include/singa/utils/blob.h
index d20f318..87a97d5 100644
--- a/include/singa/utils/blob.h
+++ b/include/singa/utils/blob.h
@@ -120,18 +120,45 @@ template <typename Dtype>
 class Blob {
  public:
   Blob() {}
+  /**
+   * Blob constructor with given shape.
+   * @param shape specifies the size of each dimension, shape[0] is the highest
+   * dimension, i.e., stride[0] = shape[1] * shape[2] * ...
+   */
   explicit Blob(const std::vector<int>& shape) { Reshape(shape); }
+  /**
+   * Blob constructor with given shape.
+   * @param[in] dim0 total num of elements.
+   */
   explicit Blob(int dim0) { Reshape(dim0); }
+  /**
+   * Blob constructor with given shape.
+   * @param[in] dim0 size of the highest dimension
+   * @param[in] dim1 size of the second highest dimension
+   */
   explicit Blob(int dim0, int dim1) { Reshape(dim0, dim1); }
+  /**
+   * Blob constructor with given shape.
+   * @param[in] dim0 size of the highest dimension
+   * @param[in] dim1
+   * @param[in] dim2
+   */
   explicit Blob(int dim0, int dim1, int dim2) { Reshape(dim0, dim1, dim2); }
+  /**
+   * Blob constructor with given shape.
+   * @param[in] dim0 size of the highest dimension
+   * @param[in] dim1
+   * @param[in] dim2
+   * @param[in] dim3
+   */
   explicit Blob(int dim0, int dim1, int dim2, int dim3) {
     Reshape(dim0, dim1, dim2, dim3);
   }
   /**
-   * Change the shape of the blob, re-allocat memory if Blob size() changes.
+   * Change the shape of the blob, re-allocate memory if Blob size() changes.
    *
-   * @param[in] shape specifies the size of each dimension, shape[0] is the highest
-   * dimension, i.e., stride[0] = shape[1] * shape[2] * ...
+   * @param[in] shape specifies the size of each dimension, shape[0] is the
+   * highest * dimension, i.e., stride[0] = shape[1] * shape[2] * ...
    */
   void Reshape(const std::vector<int>& shape);
   /**
@@ -185,22 +212,29 @@ class Blob {
    *
    * @param source the Blob to copy from
    * @param reshape if false, require this Blob to be pre-shaped to the shape
-   *        of other (and die otherwise); if true, Reshape this Blob to other's
-   *        shape if necessary
+   * of other (and die otherwise); if true, Reshape this Blob to other's
+   * shape if necessary
    */
-  void CopyFrom(const Blob<Dtype>& source);
   void CopyFrom(const Blob<Dtype>& source, bool reshape);
+  /**
+   * call CopyFrom(const Blob<Dtype>& source, bool reshape) with reshape = false
+   */
+  void CopyFrom(const Blob<Dtype>& source);
+
   void FromProto(const singa::BlobProto& proto);
   void ToProto(singa::BlobProto* proto) const;
+  /**
+   * Set each element to be v
+   */
   void SetValue(Dtype v);
   /**
    * Compute the sum of absolute values (L1 norm) of the data.
+  Dtype AsumData() const;
    */
-  Dtype asum_data() const;
   /**
    * Sum all elements
+  Dtype SumData() const;
    */
-  Dtype sum_data() const;
   /**
    * Share data with the other Blob.
    * Set the data_ shared_ptr to point to the SyncedMemory holding the data_
@@ -210,7 +244,10 @@ class Blob {
    * shared_ptr calls its destructor when reset with the "=" operator.
    */
   void ShareData(const Blob& other);
+
+  /*
   void Swap(Blob& other);
+  */
   /**
    * @return the shape vector.
    */

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/32e09219/include/singa/utils/math_addr.h
----------------------------------------------------------------------
diff --git a/include/singa/utils/math_addr.h b/include/singa/utils/math_addr.h
index ad78df7..4a610a9 100644
--- a/include/singa/utils/math_addr.h
+++ b/include/singa/utils/math_addr.h
@@ -34,6 +34,10 @@ extern "C" {
 
 
 namespace singa {
+template<typename Dtype>
+Dtype cpu_asum(int n, const Dtype* A, int inc) {
+  return cblas_sasum(n, A, inc);
+}
 
 template<typename Dtype>
 void cpu_gemm(const Dtype * A, const Dtype * B,

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/32e09219/include/singa/utils/math_blob.h
----------------------------------------------------------------------
diff --git a/include/singa/utils/math_blob.h b/include/singa/utils/math_blob.h
index bfdb5f3..4b71715 100644
--- a/include/singa/utils/math_blob.h
+++ b/include/singa/utils/math_blob.h
@@ -39,7 +39,7 @@ enum XPU {cpu, gpu, any};
  * Use blas scale internally.
  */
 template<typename Dtype>
-void Scale(xpu xpu, Dtype alpha, const Blob<Dtype> & A, Blob<Dtype> * B) {
+void Scale(XPU xpu, Dtype alpha, const Blob<Dtype> & A, Blob<Dtype> * B) {
   CHECK_EQ(A.count(), B->count());
   if (xpu == cpu)
     cpu_scale(A.count(), alpha, A.cpu_data(), B->mutable_cpu_data());
@@ -80,7 +80,7 @@ void AXPY(XPU xpu, Dtype alpha, const Blob<Dtype> & A, Blob<Dtype> * B) {
  * @param[in, out] C, vector
  */
 template<typename Dtype>
-void GEMV(XPU, xpu, Dtype alpha, Dtype beta, const Blob<Dtype>& A,
+void GEMV(XPU xpu, Dtype alpha, Dtype beta, const Blob<Dtype>& A,
     const Blob<Dtype>& B, Blob<Dtype>* C) {
   CHECK_EQ(A.shape().size(), 2) << "A must be a matrix";
   int a1, a2, m, n;
@@ -150,9 +150,9 @@ void GEMM(XPU xpu, Dtype alpha, Dtype beta, const Blob<Dtype>& A,
   b2 = B.transpose() ? B.shape(0) : B.shape(1);
   m = C->shape(0);
   n = C->shape(1);
-  CHECK__EQ(a2, b1);
-  CHECK__EQ(a1, m);
-  CHECK__EQ(b2, n);
+  CHECK_EQ(a2, b1);
+  CHECK_EQ(a1, m);
+  CHECK_EQ(b2, n);
 
   int k = A.transpose() ? A.shape(0) : A.shape(1);
   bool TranA = A.transpose();
@@ -561,6 +561,19 @@ void Expand2D(XPU xpu, const Blob<Dtype> & A, Blob<Dtype> * B) {
 #endif  // SINGA_GPU
 }
 
+/**
+ * Average the absolute values.
+ */
+template <typename Dtype>
+Dtype Asum(XPU xpu, const Blob<Dtype>& A) {
+  if (A.count() == 0) return Dtype(0);
+  if (xpu == cpu)
+    return cpu_asum(A.count(), A.cpu_data(), 1) / A.count();
+  return Dtype(0); // avoid compile warning
+#ifdef USE_GPU
+#endif
+}
+
 }  // end of namespace singa
 
 #endif  // SINGA_UTILS_MATH_BLOB_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/32e09219/src/neuralnet/layer.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/layer.cc b/src/neuralnet/layer.cc
index a7a53b8..be38ac5 100644
--- a/src/neuralnet/layer.cc
+++ b/src/neuralnet/layer.cc
@@ -27,6 +27,7 @@
 #include <cfloat>
 #include "singa/utils/factory.h"
 #include "singa/utils/singleton.h"
+#include "singa/utils/math_blob.h"
 
 namespace singa {
 
@@ -47,17 +48,17 @@ const std::string Layer::ToString(bool debug, int flag) {
     return "";
   string ret = StringPrintf("Layer %10s ", name().c_str());
   if ((flag & kForward) == kForward && data_.count() !=0) {
-    ret += StringPrintf("data norm1 %13.9f", data_.asum_data());
+    ret += StringPrintf("data norm1 %13.9f", Asum(cpu, data_));
   } else if ((flag & kBackward) == kBackward) {
     if (grad_.count() != 0)
-      ret += StringPrintf("grad norm1 %13.9f\n", grad_.asum_data());
+      ret += StringPrintf("grad norm1 %13.9f\n", Asum(cpu, grad_));
   }
   if ((flag & kTrain) == kTrain) {
     for (Param* p : GetParams()) {
       ret += StringPrintf(
           "param id %2d, name %10s, value norm1 %13.9f, grad norm1 %13.9f\n",
-          p->id(), p->name().c_str(), p->data().asum_data(),
-          p->grad().asum_data());
+          p->id(), p->name().c_str(), Asum(cpu, p->data()),
+          Asum(cpu, p->grad()));
     }
   }
   return ret;

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/32e09219/src/utils/blob.cc
----------------------------------------------------------------------
diff --git a/src/utils/blob.cc b/src/utils/blob.cc
index cd164e7..94b1fcb 100644
--- a/src/utils/blob.cc
+++ b/src/utils/blob.cc
@@ -288,6 +288,7 @@ void Blob<Dtype>::ShareData(const Blob& other) {
   data_ = other.data_;
 }
 
+/*
 template <typename Dtype>
 void Blob<Dtype>::Swap(Blob& other) {
   CHECK_EQ(other.count(), count());
@@ -295,29 +296,7 @@ void Blob<Dtype>::Swap(Blob& other) {
   std::swap(data_, other.data_);
   std::swap(capacity_, other.capacity_);
 }
-
-template <> float Blob<float>::asum_data() const {
-  if (count() == 0) return 0.f;
-  return cblas_sasum(count(), cpu_data(), 1) / count();
-}
-template <> float Blob<float>::sum_data() const {
-  if (count() == 0) return 0.f;
-  float sum = 0.f;
-  const float* dptr = cpu_data();
-  for (int i = 0; i < count(); ++i)
-    sum += dptr[i];
-  return sum / count();
-}
-
-template <> unsigned int Blob<unsigned int>::asum_data() const {
-  NOT_IMPLEMENTED;
-  return 0;
-}
-
-template <> int Blob<int>::asum_data() const {
-  NOT_IMPLEMENTED;
-  return 0;
-}
+*/
 
 INSTANTIATE_CLASS(Blob);
 template class Blob<int>;