You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2016/06/03 07:48:40 UTC
[35/60] incubator-singa git commit: SINGA-170 Add Dropout layer and
CudnnDropout layer
SINGA-170 Add Dropout layer and CudnnDropout layer
Checked code format via cpplint.py.
Tested the compilation and linking for cudnn.
Note, if there are multiple cuda installed, pls configure CUDA_BIN_PATH
to your cuda path (e.g., /usr/local/cuda-7.5) before `cmake ..`.
You need to set the CMAKE_INCLUDE_PATH and CMAKE_LIBRARY_PATH for cudnn.
Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/b4918753
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/b4918753
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/b4918753
Branch: refs/heads/dev
Commit: b4918753cfee52a5ef537e453c953b4c384044d2
Parents: 3a87201
Author: Wei Wang <wa...@comp.nus.edu.sg>
Authored: Wed May 18 12:03:36 2016 +0800
Committer: Wei Wang <wa...@comp.nus.edu.sg>
Committed: Wed May 18 12:03:36 2016 +0800
----------------------------------------------------------------------
include/singa/core/common.h | 4 ++--
include/singa/core/device.h | 1 -
include/singa/core/tensor.h | 10 +++++-----
src/core/tensor/tensor.cc | 32 ++++++++++++++++----------------
src/model/layer/cudnn_dropout.h | 10 ++++++----
src/model/layer/cudnn_utils.h | 6 +++---
src/model/layer/dropout.h | 10 +++++++---
src/model/layer/rnn.h | 13 ++++++++++---
8 files changed, 49 insertions(+), 37 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/b4918753/include/singa/core/common.h
----------------------------------------------------------------------
diff --git a/include/singa/core/common.h b/include/singa/core/common.h
index 4d783fb..2f5b167 100644
--- a/include/singa/core/common.h
+++ b/include/singa/core/common.h
@@ -24,7 +24,7 @@
#ifdef USE_CUDA
#include <cuda_runtime.h>
-#include "cublas_v2.h"
+#include <cublas_v2.h>
#ifdef USE_CUDNN
#include <cudnn.h>
#endif
@@ -40,7 +40,7 @@ typedef struct _Cuda { } Cuda;
typedef struct _Cudnn { } Cudnn;
/// To implement function using opencl libraries
typedef struct _Opencl { } Opencl;
-} // namespace lib;
+} // namespace lib
typedef unsigned char Byte;
/// Blob reprent a chunk of memory (on device or host) managed by VirtualMemory.
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/b4918753/include/singa/core/device.h
----------------------------------------------------------------------
diff --git a/include/singa/core/device.h b/include/singa/core/device.h
index b96efca..9022041 100644
--- a/include/singa/core/device.h
+++ b/include/singa/core/device.h
@@ -130,7 +130,6 @@ class CppDevice : public Device {
/// Free cpu memory.
void Free(void* ptr) override;
-
};
/// a singleton CppDevice as the host for all devices.
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/b4918753/include/singa/core/tensor.h
----------------------------------------------------------------------
diff --git a/include/singa/core/tensor.h b/include/singa/core/tensor.h
index 6c20c4f..88a895b 100644
--- a/include/singa/core/tensor.h
+++ b/include/singa/core/tensor.h
@@ -65,8 +65,8 @@ class Tensor {
public:
~Tensor();
Tensor();
- Tensor(Shape&& shape, DataType dtype = kFloat32);
- Tensor(const Shape& shape, DataType dtype = kFloat32);
+ explicit Tensor(Shape&& shape, DataType dtype = kFloat32);
+ explicit Tensor(const Shape& shape, DataType dtype = kFloat32);
Tensor(Shape&& shape, Device* dev, DataType dtype = kFloat32);
Tensor(const Shape& shape, Device* dev, DataType dtype = kFloat32);
@@ -278,7 +278,7 @@ Tensor operator/(const Tensor& t, DType x);
template <typename DType>
void Div(const Tensor& t, DType x, Tensor* ret);
-//================Blas operations============================================
+// ================Blas operations============================================
// ===== Level 1
// TODO(wangwei) make amax/amin/asum a member function of tensor
// void Amax(Tensor, Context* ctx); Get the index of the max value in a vector
@@ -308,7 +308,7 @@ void Mult(DType alpha, const Tensor& lhs, DType beta, const Tensor& rhs,
// tempalte<typename DType> T Dot(const Tensor& lhs, const Tensor& rhs);
-//================Random operations==========================================
+// ================Random operations==========================================
/// For each element x set x = 1 if random() < p; otherwise x = 1.
void Bernoulli(float p, Tensor* t);
/// Fill in Tensor 't' following uniform distribution.
@@ -316,7 +316,7 @@ void Uniform(float low, float high, Tensor* t);
/// Fill in Tensor 't' following Gaussian distribution.
void Gaussian(float mean, float std, Tensor* t);
-//================Neural Net operations======================================
+// ================Neural Net operations======================================
/* following API of cudnn, e.g., conv, pool, lrn, batchnorm, softmax
void ConvFwd(const ConvConf& conf, const Tensor& x, const Tensor& w, Tensor* y);
void ConvBwdBias(const ConvConf& conf, const Tensor& dy, Tensor* db);
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/b4918753/src/core/tensor/tensor.cc
----------------------------------------------------------------------
diff --git a/src/core/tensor/tensor.cc b/src/core/tensor/tensor.cc
index cd62a38..0e5570d 100644
--- a/src/core/tensor/tensor.cc
+++ b/src/core/tensor/tensor.cc
@@ -381,7 +381,7 @@ GenBinaryTensorFunction(Pow, Pow);
#define EltwiseTensorScalarFn(fn, t, x, ret) \
do { \
TYPE_LIB_SWITCH(t.data_type(), DType, t.device()->device_lib(), Lib, { \
- static_assert(std::is_same<SType, DType>::value, \
+ static_assert(std::is_same<SType, DType>::value, \
"The Scalar type must match the Tensor data type"); \
ret->device()->Exec( \
[t, x, ret](Context* ctx) { \
@@ -436,8 +436,8 @@ template Tensor Mult<float>(float alpha, const Tensor& lhs, float beta,
const Tensor& rhs);
template <typename SType>
-void Mult(SType alpha, const Tensor& A, SType beta, const Tensor& B, Tensor* C)
-{
+void Mult(SType alpha, const Tensor& A, SType beta, const Tensor& B,
+ Tensor* C) {
CHECK_EQ(A.shape().size(), 2u);
bool transA = A.transpose();
size_t m = transA ? A.shape()[1] : A.shape()[0], n = 0;
@@ -445,14 +445,14 @@ void Mult(SType alpha, const Tensor& A, SType beta, const Tensor& B, Tensor* C)
n = C->Size();
TYPE_LIB_SWITCH(A.data_type(), DType, A.device()->device_lib(), Lib, {
static_assert(std::is_same<SType, DType>::value,
- "The scalar type must be the same as the tensor data type");
+ "The scalar type must be the same as the tensor data type");
C->device()->Exec(
- [transA, m, n, alpha, A, beta, B, C](Context* ctx) {
- GEMV<DType, Lib>(transA, m, n, alpha, A.blob(),
- B.blob(), beta, C->blob(), ctx);
- },
- {A.blob(), B.blob()}, {C->blob()});
- });
+ [transA, m, n, alpha, A, beta, B, C](Context* ctx) {
+ GEMV<DType, Lib>(transA, m, n, alpha, A.blob(), B.blob(), beta,
+ C->blob(), ctx);
+ },
+ {A.blob(), B.blob()}, {C->blob()});
+ });
} else {
CHECK(!C->transpose());
bool transB = B.transpose();
@@ -462,15 +462,15 @@ void Mult(SType alpha, const Tensor& A, SType beta, const Tensor& B, Tensor* C)
CHECK_EQ(A.Size(), m * k);
CHECK_EQ(B.Size(), n * k);
TYPE_LIB_SWITCH(A.data_type(), DType, A.device()->device_lib(), Lib, {
- static_assert(std::is_same<SType, DType>::value,
- "The scalar type must be the same as the tensor data type");
- C->device()->Exec(
+ static_assert(std::is_same<SType, DType>::value,
+ "The scalar type must be the same as the tensor data type");
+ C->device()->Exec(
[transA, transB, m, n, k, alpha, A, beta, B, C](Context* ctx) {
- GEMM<DType, Lib>(transA, transB, m, n, k, alpha, A.blob(),
- B.blob(), beta, C->blob(), ctx);
+ GEMM<DType, Lib>(transA, transB, m, n, k, alpha, A.blob(), B.blob(),
+ beta, C->blob(), ctx);
},
{A.blob(), B.blob()}, {C->blob()});
- });
+ });
}
}
template void Mult<float>(float alpha, const Tensor& lhs, float beta,
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/b4918753/src/model/layer/cudnn_dropout.h
----------------------------------------------------------------------
diff --git a/src/model/layer/cudnn_dropout.h b/src/model/layer/cudnn_dropout.h
index d2b68b9..db0aa15 100644
--- a/src/model/layer/cudnn_dropout.h
+++ b/src/model/layer/cudnn_dropout.h
@@ -16,12 +16,14 @@
* limitations under the License.
*/
-#ifndef SINGA_MODEL_LAYER_CUDNN_DROPOUT_H_
-#define SINGA_MODEL_LAYER_CUDNN_DROPOUT_H_
+#ifndef SRC_MODEL_LAYER_CUDNN_DROPOUT_H_
+#define SRC_MODEL_LAYER_CUDNN_DROPOUT_H_
#ifdef USE_CUDNN
// cudnn dropout is added in cudnn 5
#if CUDNN_MAJOR_VERSION >= 5
-
+#include <utility>
+#include <string>
+#include <vector>
#include "./dropout.h"
#include "singa/core/common.h"
#include "singa/model/layer.h"
@@ -51,4 +53,4 @@ class CudnnDropout : public Dropout {
} // namespace
#endif // CUDNN_VERSION_MAJOR>=5
#endif // USE_CUDNN
-#endif // SINGA_MODEL_LAYER_CUDNN_DROPOUT_H_
+#endif // SRC_MODEL_LAYER_CUDNN_DROPOUT_H_
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/b4918753/src/model/layer/cudnn_utils.h
----------------------------------------------------------------------
diff --git a/src/model/layer/cudnn_utils.h b/src/model/layer/cudnn_utils.h
index 92c8df7..298ee5c 100644
--- a/src/model/layer/cudnn_utils.h
+++ b/src/model/layer/cudnn_utils.h
@@ -15,8 +15,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef SINGA_MODEL_LAYER_CUDNN_BASE_H_
-#define SINGA_MODEL_LAYER_CUDNN_BASE_H_
+#ifndef SRC_MODEL_LAYER_CUDNN_UTILS_H_
+#define SRC_MODEL_LAYER_CUDNN_UTILS_H_
#ifdef USE_CUDNN
@@ -82,4 +82,4 @@ inline const char* cudnnGetErrorString(cudnnStatus_t status) {
} // namespace singa
#endif // USE_CUDNN
-#endif // SINGA_MODEL_LAYER_CUDNN_BASE_H_
+#endif // SRC_MODEL_LAYER_CUDNN_UTILS_H_
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/b4918753/src/model/layer/dropout.h
----------------------------------------------------------------------
diff --git a/src/model/layer/dropout.h b/src/model/layer/dropout.h
index a6e733a..5efaf6a 100644
--- a/src/model/layer/dropout.h
+++ b/src/model/layer/dropout.h
@@ -15,9 +15,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef SINGA_MODEL_LAYER_DROPOUT_H_
-#define SINGA_MODEL_LAYER_DROPOUT_H_
+#ifndef SRC_MODEL_LAYER_DROPOUT_H_
+#define SRC_MODEL_LAYER_DROPOUT_H_
+#include <utility>
+#include <string>
+#include <vector>
#include "singa/model/layer.h"
+
namespace singa {
class Dropout : public Layer {
public:
@@ -55,4 +59,4 @@ class Dropout : public Layer {
Tensor mask_;
};
} // namespace singa
-#endif // SINGA_MODEL_LAYER_DROPOUT_H_
+#endif // SRC_MODEL_LAYER_DROPOUT_H_
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/b4918753/src/model/layer/rnn.h
----------------------------------------------------------------------
diff --git a/src/model/layer/rnn.h b/src/model/layer/rnn.h
index a6ba461..35c86bd 100644
--- a/src/model/layer/rnn.h
+++ b/src/model/layer/rnn.h
@@ -15,9 +15,16 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef SINGA_MODEL_LAYER_DROPOUT_H_
-#define SINGA_MODEL_LAYER_DROPOUT_H_
+#ifndef SRC_MODEL_LAYER_RNN_H_
+#define SRC_MODEL_LAYER_RNN_H_
+
+#include <utility>
+#include <string>
+#include <vector>
+#include <stack>
+
#include "singa/model/layer.h"
+
namespace singa {
/// To enable use the same layer multiple times in one iteration in RNN,
/// the Forward() function pushes the 'input' or 'output' that are
@@ -56,4 +63,4 @@ class RNN : public Layer {
std::stack<Tensor*> states_;
};
} // namespace singa
-#endif // SINGA_MODEL_LAYER_DROPOUT_H_
+#endif // SRC_MODEL_LAYER_RNN_H_