You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by ka...@apache.org on 2018/07/13 05:43:47 UTC

[3/3] incubator-singa git commit: SINGA-378 Implement maxpooling operation and its related functions for autograd

SINGA-378 Implement maxpooling operation and its related functions for autograd

- fix some bugs and test the codes (mnist_cnn.py example runs well)


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/a3629182
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/a3629182
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/a3629182

Branch: refs/heads/master
Commit: a36291824bdfd99b907adc68b5fc206c9053bdc8
Parents: fb5cb9a
Author: xuewanqi <xu...@outlook.com>
Authored: Thu Jul 12 11:10:49 2018 +0000
Committer: xuewanqi <xu...@outlook.com>
Committed: Thu Jul 12 11:10:49 2018 +0000

----------------------------------------------------------------------
 examples/autograd/mnist_cnn.py | 1 -
 python/singa/autograd.py       | 2 +-
 src/model/operation/pooling.cc | 8 ++++----
 3 files changed, 5 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/a3629182/examples/autograd/mnist_cnn.py
----------------------------------------------------------------------
diff --git a/examples/autograd/mnist_cnn.py b/examples/autograd/mnist_cnn.py
index d42dc76..92fc43a 100755
--- a/examples/autograd/mnist_cnn.py
+++ b/examples/autograd/mnist_cnn.py
@@ -117,7 +117,6 @@ if __name__ == '__main__':
         y = conv1(x)
         y = autograd.relu(y)
         y = bn1(y)
-        y = autograd.max_pool_2d(y)
         y = pooling1(y)
 
         y = conv2(y)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/a3629182/python/singa/autograd.py
----------------------------------------------------------------------
diff --git a/python/singa/autograd.py b/python/singa/autograd.py
index 7b4d18d..16d7f82 100755
--- a/python/singa/autograd.py
+++ b/python/singa/autograd.py
@@ -773,7 +773,7 @@ class _BatchNorm(Operation):
 
 
 def batchnorm(handle, x, scale, bias, running_mean, running_var):
-    return _BatchNorm(handle, running_mean, running_var, handle)(x, scale, bias)[0]
+    return _BatchNorm(handle, running_mean, running_var)(x, scale, bias)[0]
 
 
 class _Pooling2D(Operation):

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/a3629182/src/model/operation/pooling.cc
----------------------------------------------------------------------
diff --git a/src/model/operation/pooling.cc b/src/model/operation/pooling.cc
old mode 100644
new mode 100755
index 0072671..03ff804
--- a/src/model/operation/pooling.cc
+++ b/src/model/operation/pooling.cc
@@ -39,7 +39,7 @@ CudnnPoolingHandle::CudnnPoolingHandle(const Tensor &input,
                                        const bool is_max)
   : PoolingHandle(input, kernel_size, stride, padding, is_max) {
 
-#nan_prop = CUDNN_NOT_PROPAGATE_NAN;
+//nan_prop = CUDNN_NOT_PROPAGATE_NAN;
 
   DataType dtype = input.data_type();
 
@@ -73,7 +73,7 @@ CudnnPoolingHandle::~CudnnPoolingHandle() {
 }
 
 
-Tensor GpuPoolingForward(const Tensor &x, const CudnnPoolingHandle &cph) {
+Tensor GpuPoolingForward(const CudnnPoolingHandle &cph, const Tensor &x) {
   CHECK_EQ(x.device()->lang(), kCuda);
   CHECK_EQ(x.nDim(), 4u);
 
@@ -89,8 +89,8 @@ Tensor GpuPoolingForward(const Tensor &x, const CudnnPoolingHandle &cph) {
   return output;
 }
 
-Tensor GpuPoolingBackward(const Tensor &dy, const Tensor& x, const Tensor& y,
-                          const CudnnPoolingHandle &cph) {
+Tensor GpuPoolingBackward(const CudnnPoolingHandle &cph, const Tensor &dy,
+                          const Tensor& x, const Tensor& y) {
   CHECK_EQ(dy.device()->lang(), kCuda);
   CHECK_EQ(dy.nDim(), 4u);