You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2016/10/25 08:43:39 UTC
[3/6] incubator-singa git commit: SINGA-267 Add spatial mode in batch
normalization layer
SINGA-267 Add spatial mode in batch normalization layer
Revise batchnorm test cases for spatial mode.
Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/f2c0b843
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/f2c0b843
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/f2c0b843
Branch: refs/heads/master
Commit: f2c0b843b4018622e0f0caa472d7d2dc259ce1f8
Parents: 47e38c8
Author: WANG Ji <ij...@gmail.com>
Authored: Sun Oct 23 20:54:38 2016 +0800
Committer: xiezl <xi...@comp.nus.edu.sg>
Committed: Tue Oct 25 14:52:34 2016 +0800
----------------------------------------------------------------------
src/model/layer/batchnorm.cc | 2 +-
test/singa/test_batchnorm.cc | 44 ++++++++++++++++++---------------------
2 files changed, 21 insertions(+), 25 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/f2c0b843/src/model/layer/batchnorm.cc
----------------------------------------------------------------------
diff --git a/src/model/layer/batchnorm.cc b/src/model/layer/batchnorm.cc
index afe9a36..e07dfd9 100644
--- a/src/model/layer/batchnorm.cc
+++ b/src/model/layer/batchnorm.cc
@@ -117,7 +117,7 @@ const Tensor BatchNorm::Forward(int flag, const Tensor& input) {
bnBias_.Reshape(Shape{channels_, 1});
std::vector<Tensor> mean_stack, var_stack, scale_stack, bias_stack;
- for (int i = 0; i < height_ * width_; ++i) {
+ for (unsigned i = 0; i < height_ * width_; ++i) {
mean_stack.push_back(runningMean_);
var_stack.push_back(runningVariance_);
scale_stack.push_back(bnScale_);
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/f2c0b843/test/singa/test_batchnorm.cc
----------------------------------------------------------------------
diff --git a/test/singa/test_batchnorm.cc b/test/singa/test_batchnorm.cc
index c8efbf9..fadba42 100644
--- a/test/singa/test_batchnorm.cc
+++ b/test/singa/test_batchnorm.cc
@@ -43,19 +43,19 @@ TEST(BatchNorm, Setup) {
TEST(BatchNorm, Forward) {
BatchNorm batchnorm;
const float x[] = {1, 2, 3, 4};
- Tensor in(Shape{2, 1, 2, 1});
- in.CopyDataFromHostPtr(x, 2 * 1 * 2 * 1);
+ Tensor in(Shape{2, 2});
+ in.CopyDataFromHostPtr(x, 2 * 2);
const float alpha_[] = {1, 1};
- Tensor alpha(Shape{1, 2});
- alpha.CopyDataFromHostPtr(alpha_, 1 * 2);
+ Tensor alpha(Shape{2});
+ alpha.CopyDataFromHostPtr(alpha_, 2);
const float beta_[] = {2, 2};
- Tensor beta(Shape{1, 2});
- beta.CopyDataFromHostPtr(beta_, 1 * 2);
+ Tensor beta(Shape{2});
+ beta.CopyDataFromHostPtr(beta_, 2);
singa::LayerConf conf;
singa::BatchNormConf *batchnorm_conf = conf.mutable_batchnorm_conf();
batchnorm_conf->set_factor(1);
- batchnorm.Setup(Shape{1, 2, 1}, conf);
+ batchnorm.Setup(Shape{2}, conf);
batchnorm.set_bnScale(alpha);
batchnorm.set_bnBias(beta);
batchnorm.set_runningMean(beta);
@@ -63,11 +63,9 @@ TEST(BatchNorm, Forward) {
Tensor out = batchnorm.Forward(kTrain, in);
const float *outptr = out.data<float>();
const auto &shape = out.shape();
- EXPECT_EQ(4u, shape.size());
+ EXPECT_EQ(2u, shape.size());
EXPECT_EQ(2u, shape[0]);
- EXPECT_EQ(1u, shape[1]);
- EXPECT_EQ(2u, shape[2]);
- EXPECT_EQ(1u, shape[3]);
+ EXPECT_EQ(2u, shape[1]);
EXPECT_NEAR(1.0f, outptr[0], 1e-4f);
EXPECT_NEAR(1.0f, outptr[1], 1e-4f);
EXPECT_NEAR(3.0f, outptr[2], 1e-4f);
@@ -77,22 +75,22 @@ TEST(BatchNorm, Forward) {
TEST(BatchNorm, Backward) {
BatchNorm batchnorm;
const float x[] = {1, 2, 3, 4};
- Tensor in(Shape{2, 1, 2, 1});
- in.CopyDataFromHostPtr(x, 2 * 1 * 2 * 1);
+ Tensor in(Shape{2, 2});
+ in.CopyDataFromHostPtr(x, 2 * 2);
const float dy[] = {4, 3, 2, 1};
- Tensor dy_in(Shape{2, 1, 2, 1});
- dy_in.CopyDataFromHostPtr(dy, 2 * 1 * 2 * 1);
+ Tensor dy_in(Shape{2, 2});
+ dy_in.CopyDataFromHostPtr(dy, 2 * 2);
const float alpha_[] = {1, 1};
- Tensor alpha(Shape{1, 2});
- alpha.CopyDataFromHostPtr(alpha_, 1 * 2);
+ Tensor alpha(Shape{2});
+ alpha.CopyDataFromHostPtr(alpha_, 2);
const float beta_[] = {0, 0};
- Tensor beta(Shape{1, 2});
- beta.CopyDataFromHostPtr(beta_, 1 * 2);
+ Tensor beta(Shape{2});
+ beta.CopyDataFromHostPtr(beta_, 2);
singa::LayerConf conf;
singa::BatchNormConf *batchnorm_conf = conf.mutable_batchnorm_conf();
batchnorm_conf->set_factor(1);
- batchnorm.Setup(Shape{1, 2, 1}, conf);
+ batchnorm.Setup(Shape{2}, conf);
batchnorm.set_bnScale(alpha);
batchnorm.set_bnBias(beta);
batchnorm.set_runningMean(beta);
@@ -101,11 +99,9 @@ TEST(BatchNorm, Backward) {
auto ret = batchnorm.Backward(kTrain, dy_in);
Tensor dx = ret.first;
const auto & shape = dx.shape();
- EXPECT_EQ(4u, shape.size());
+ EXPECT_EQ(2u, shape.size());
EXPECT_EQ(2u, shape[0]);
- EXPECT_EQ(1u, shape[1]);
- EXPECT_EQ(2u, shape[2]);
- EXPECT_EQ(1u, shape[3]);
+ EXPECT_EQ(2u, shape[1]);
const float *dxptr = ret.first.data<float>();
EXPECT_NEAR(.0f, dxptr[0], 1e-4f);
EXPECT_NEAR(.0f, dxptr[1], 1e-4f);