You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2017/06/05 06:05:12 UTC

incubator-singa git commit: Fix the problem when batchnorm layer connect to dense layer

Repository: incubator-singa
Updated Branches:
  refs/heads/master 62f5e1853 -> cd485a7ae


Fix the problem when batchnorm layer connect to dense layer


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/cd485a7a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/cd485a7a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/cd485a7a

Branch: refs/heads/master
Commit: cd485a7aec44f2a68609d84379700c72fb526092
Parents: 62f5e18
Author: jedshady <mi...@163.com>
Authored: Mon Jun 5 09:49:45 2017 +0800
Committer: jedshady <mi...@163.com>
Committed: Mon Jun 5 09:49:45 2017 +0800

----------------------------------------------------------------------
 src/model/layer/cudnn_batchnorm.cc | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/cd485a7a/src/model/layer/cudnn_batchnorm.cc
----------------------------------------------------------------------
diff --git a/src/model/layer/cudnn_batchnorm.cc b/src/model/layer/cudnn_batchnorm.cc
index 1dbb05b..5c93a6b 100644
--- a/src/model/layer/cudnn_batchnorm.cc
+++ b/src/model/layer/cudnn_batchnorm.cc
@@ -80,13 +80,13 @@ const Tensor CudnnBatchNorm::Forward(int flag, const Tensor& input) {
           &n, &c, &h, &w, &s, &s, &s, &s));
     if (shape[0] != static_cast<size_t>(n))
       InitCudnn(shape, dtype);
-    CHECK(input.shape(1) == static_cast<size_t>(c)
-        && input.shape(2) == static_cast<size_t>(h)
-        && input.shape(3) == static_cast<size_t>(w))
+    CHECK(shape[1] == static_cast<size_t>(c)
+        && shape[2] == static_cast<size_t>(h)
+        && shape[3] == static_cast<size_t>(w))
       << "input sample shape should not change"
       << "previous shape " << c << ", " << h << ", " << w
-      << "current shape " << input.shape(1) << ", " << input.shape(2) << ", "
-      << input.shape(3);
+      << "current shape " << shape[1] << ", " << shape[2] << ", "
+      << shape[3];
   }