You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2016/08/17 18:02:55 UTC

[34/51] [abbrv] incubator-singa git commit: SINGA-237 New documentation files for SINGA v1.0

SINGA-237 New documentation files for SINGA v1.0

update the layer identifier. if the implementation is transparent to devices,
then it has an extra identifier 'singa' besides the speicific identifiers,
i.e., 'singacpp', 'singacl' and 'singacuda'


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/c2173b30
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/c2173b30
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/c2173b30

Branch: refs/heads/master
Commit: c2173b3097a6f38ff2a44f48cc250219ad41b8d4
Parents: 5d20d35
Author: Wei Wang <wa...@comp.nus.edu.sg>
Authored: Mon Aug 15 20:46:24 2016 +0800
Committer: Wei Wang <wa...@comp.nus.edu.sg>
Committed: Mon Aug 15 21:04:32 2016 +0800

----------------------------------------------------------------------
 src/model/layer/convolution.cc |  1 -
 src/model/layer/pooling.cc     |  1 -
 src/python/singa/layer.py      | 16 ++++++++++------
 3 files changed, 10 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c2173b30/src/model/layer/convolution.cc
----------------------------------------------------------------------
diff --git a/src/model/layer/convolution.cc b/src/model/layer/convolution.cc
index 0d1751d..52e9d93 100644
--- a/src/model/layer/convolution.cc
+++ b/src/model/layer/convolution.cc
@@ -23,7 +23,6 @@
 namespace singa {
 using std::vector;
 
-RegisterLayerClass(singa_convolution, Convolution);
 RegisterLayerClass(singacpp_convolution, Convolution);
 void Convolution::Setup(const Shape &in_sample, const LayerConf &conf) {
   Layer::Setup(in_sample, conf);

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c2173b30/src/model/layer/pooling.cc
----------------------------------------------------------------------
diff --git a/src/model/layer/pooling.cc b/src/model/layer/pooling.cc
index 23969da..a18f9de 100644
--- a/src/model/layer/pooling.cc
+++ b/src/model/layer/pooling.cc
@@ -20,7 +20,6 @@
 #include "singa/model/layer.h"
 namespace singa {
 
-RegisterLayerClass(singa_pooling, Pooling);
 RegisterLayerClass(singacpp_pooling, Pooling);
 void Pooling::Setup(const Shape& in_sample, const LayerConf& conf) {
   Layer::Setup(in_sample, conf);

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c2173b30/src/python/singa/layer.py
----------------------------------------------------------------------
diff --git a/src/python/singa/layer.py b/src/python/singa/layer.py
index a9f3826..86ba836 100644
--- a/src/python/singa/layer.py
+++ b/src/python/singa/layer.py
@@ -56,7 +56,8 @@ For example, CudnnConvolution layer is identified by 'cudnn_convolution';
 Some layers' implementation use only Tensor functions, thererfore they are
 transparent to the underlying devices. For threse layers, they would have
 multiple identifiers, e.g., singacpp_dropout, singacuda_dropout and
-singacl_dropout are all for the Dropout layer.
+singacl_dropout are all for the Dropout layer. In addition, it has an extra
+identifier 'singa', i.e. 'singa_dropout' also stands for the Dropout layer.
 
 engine is case insensitive. Each python layer would create the correct specific
 layer using the engine attribute.
@@ -439,7 +440,8 @@ class BatchNormalization(Layer):
         self.param_specs.append(_construct_param_specs_from_dict(beta_specs))
         self.param_specs.append(_construct_param_specs_from_dict(mean_specs))
         self.param_specs.append(_construct_param_specs_from_dict(var_specs))
-        _check_engine(engine, ['cudnn', 'singacpp', 'singacuda', 'singacl'])
+        _check_engine(engine, ['cudnn', 'singa', 'singacpp', 'singacuda',
+                               'singacl'])
         self.layer = _create_layer(engine, 'BatchNorm')
         if input_sample_shape is not None:
             self.setup(input_sample_shape)
@@ -466,7 +468,8 @@ class LRN(Layer):
         # TODO(wangwei) enable mode = 'within_channel'
         assert mode == 'cross_channel', 'only support mode="across_channel"'
         conf.norm_region = model_pb2.LRNConf.ACROSS_CHANNELS
-        _check_engine(engine, ['cudnn', 'singacpp', 'singacuda', 'singacl'])
+        _check_engine(engine, ['cudnn', 'singa', 'singacpp', 'singacuda',
+                               'singacl'])
         self.layer = _create_layer(engine, 'LRN')
         if input_sample_shape is not None:
             self.setup(input_sample_shape)
@@ -555,7 +558,8 @@ class Dropout(Layer):
         # 'cudnn' works for v>=5.0
         #  if engine.lower() == 'cudnn':
         #      engine = 'cuda'
-        _check_engine(engine, ['cudnn', 'singacpp', 'singacuda', 'singacl'])
+        _check_engine(engine, ['cudnn', 'singa', 'singacpp', 'singacuda',
+                               'singacl'])
         self.layer = _create_layer(engine, 'Dropout')
         if input_sample_shape is not None:
             self.setup(input_sample_shape)
@@ -590,7 +594,8 @@ class Softmax(Layer):
         super(Softmax, self).__init__(name)
         # conf = self.conf.softmax_conf
         # conf.axis = axis
-        _check_engine(engine, ['cudnn', 'singacpp', 'singacl', 'singacuda'])
+        _check_engine(engine, ['cudnn', 'singa', 'singacpp', 'singacl',
+                               'singacuda'])
         self.layer = _create_layer(engine, 'Softmax')
         if input_sample_shape is not None:
             self.setup(input_sample_shape)
@@ -820,7 +825,6 @@ def _construct_param_specs_from_dict(specs):
         a ParamSpec object
     """
     conf = model_pb2.ParamSpec()
-    print 'convert', specs
     if 'name' in specs:
         conf.name = specs['name']
     if 'lr_mult' in specs: