You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2016/08/17 18:03:09 UTC

[48/51] [abbrv] incubator-singa git commit: Preparing for V1.0 RC0.

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/examples/mnist_rbm4.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_rbm4.py b/tool/python/examples/mnist_rbm4.py
deleted file mode 100755
index 8343b4f..0000000
--- a/tool/python/examples/mnist_rbm4.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-#/************************************************************
-#*
-#* Licensed to the Apache Software Foundation (ASF) under one
-#* or more contributor license agreements.  See the NOTICE file
-#* distributed with this work for additional information
-#* regarding copyright ownership.  The ASF licenses this file
-#* to you under the Apache License, Version 2.0 (the
-#* "License"); you may not use this file except in compliance
-#* with the License.  You may obtain a copy of the License at
-#*
-#*   http://www.apache.org/licenses/LICENSE-2.0
-#*
-#* Unless required by applicable law or agreed to in writing,
-#* software distributed under the License is distributed on an
-#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#* KIND, either express or implied.  See the License for the
-#* specific language governing permissions and limitations
-#* under the License.
-#*
-#*************************************************************/
-
-
-import sys, os
-sys.path.append(os.path.join(os.path.dirname(__file__),'..'))
-from singa.model import *
-from examples.datasets import mnist
-
-rbmid = 4
-pvalues = {'batchsize' : 100, 'shape' : 784, 'std_value' : 255}
-X_train, X_test, workspace = mnist.load_data(
-            workspace = 'examples/rbm/rbm'+str(rbmid),
-            nb_rbm = rbmid,
-            checkpoint_steps = 6000,
-            **pvalues)
-
-m = Energy('rbm'+str(rbmid), sys.argv)
-
-out_dim = [1000, 500, 250, 30]
-m.add(RBM(out_dim, sampling='gaussian', w_std=0.1, b_wd=0))
-
-sgd = SGD(lr=0.001, decay=0.0002, momentum=0.8)
-topo = Cluster(workspace)
-m.compile(optimizer=sgd, cluster=topo)
-m.fit(X_train, alg='cd', nb_epoch=6000)
-#result = m.evaluate(X_test, test_steps=100, test_freq=500)
-

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/examples/train_cifar10.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/train_cifar10.py b/tool/python/examples/train_cifar10.py
deleted file mode 100755
index e8ac973..0000000
--- a/tool/python/examples/train_cifar10.py
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/bin/env python
-
-#/************************************************************
-#*
-#* Licensed to the Apache Software Foundation (ASF) under one
-#* or more contributor license agreements.  See the NOTICE file
-#* distributed with this work for additional information
-#* regarding copyright ownership.  The ASF licenses this file
-#* to you under the Apache License, Version 2.0 (the
-#* "License"); you may not use this file except in compliance
-#* with the License.  You may obtain a copy of the License at
-#*
-#*   http://www.apache.org/licenses/LICENSE-2.0
-#*
-#* Unless required by applicable law or agreed to in writing,
-#* software distributed under the License is distributed on an
-#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#* KIND, either express or implied.  See the License for the
-#* specific language governing permissions and limitations
-#* under the License.
-#*
-#*************************************************************/
-
-'''
-Example script of CNN model for CIFAR10 dataset
-'''
-import os, sys
-import numpy as np
-
-current_path_ = os.path.dirname(__file__)
-singa_root_ = os.path.abspath(os.path.join(current_path_,'../../..'))
-sys.path.append(os.path.join(singa_root_,'tool','python'))
-
-from singa.driver import Driver
-from singa.layer import *
-from singa.model import *
-
-
-'''
-CIFAR10 dataset can be downloaded at [https://www.cs.toronto.edu/~kriz/cifar.html]
-- please specify dataset_dir
-'''
-dataset_dir_ = singa_root_ + "/tool/python/examples/datasets/cifar-10-batches-py"
-mean_image = None
-
-def unpickle(file):
-    ''' This method loads dataset provided at CIFAR10 website
-        See [https://www.cs.toronto.edu/~kriz/cifar.html] for more details
-    '''
-    import cPickle
-    fo = open(file, 'rb')
-    dict = cPickle.load(fo)
-    fo.close()
-    return dict
-
-def compute_mean_image():
-    ''' This is a sample script to cmopute the average image
-        of all samples in 5 dataset of cifar10
-    '''
-    mean = None
-    nb_samples_total = 0
-    for did in range(1,6):
-        fname_train_data = dataset_dir_ + "/data_batch_{}".format(did)
-        cifar10 = unpickle(fname_train_data)
-        image = cifar10['data'].astype(dtype=np.uint8)
-        if did > 1:
-            image = np.vstack((image, image))
-    return np.average(image, axis=0)
-
-def load_dataset(did=1):
-    ''' CIFAR10 dataset includes
-        5 binary dataset, each contains 10000 images
-        1 row (1 image) includes 1 label & 3072 pixels
-        3072 pixels are  3 channels of a 32x32 image
-    '''
-    assert mean_image is not None, 'mean_image is required'
-    print '[Load CIFAR10 dataset {}]'.format(did)
-    fname_train_data = dataset_dir_ + "/data_batch_{}".format(did)
-    cifar10 = unpickle(fname_train_data)
-    image = cifar10['data'].astype(dtype=np.uint8)
-    image = image - mean_image
-    print '  image x:', image.shape
-    label = np.asarray(cifar10['labels'], dtype=np.uint8)
-    label = label.reshape(label.size, 1)
-    print '  label y:', label.shape
-    return image, label
-
-#-------------------------------------------------------------------
-mean_image = compute_mean_image()
-# mean_image = np.fromfile('tool/python/examples/datasets/cifar10_mean_image')
-
-print '[Layer registration/declaration]'
-d = Driver()
-d.Init(sys.argv)
-
-input = ImageInput(32, 32, 3) # image width, height, channel
-label = LabelInput()
-
-nn = []
-nn.append(input)
-nn.append(Convolution2D(32, 5, 1, 2, w_std=0.0001, b_lr=2))
-nn.append(MaxPooling2D(pool_size=(3,3), stride=2))
-nn.append(Activation('relu'))
-nn.append(LRN2D(3, alpha=0.00005, beta=0.75))
-nn.append(Convolution2D(32, 5, 1, 2, b_lr=2))
-nn.append(Activation('relu'))
-nn.append(AvgPooling2D(pool_size=(3,3), stride=2))
-nn.append(LRN2D(3, alpha=0.00005, beta=0.75))
-nn.append(Convolution2D(64, 5, 1, 2))
-nn.append(Activation('relu'))
-nn.append(AvgPooling2D(pool_size=(3,3), stride=2))
-nn.append(Dense(10, w_wd=250, b_lr=2, b_wd=0))
-loss = Loss('softmaxloss')
-
-# updater
-sgd = SGD(decay=0.004, momentum=0.9, lr_type='manual', step=(0,60000,65000), step_lr=(0.001,0.0001,0.00001))
-
-#-------------------------------------------------------------------
-batchsize = 100
-disp_freq = 50
-train_step = 1000
-
-print '[Start training]'
-for dataset_id in range(train_step / batchsize):
-
-    x, y = load_dataset(dataset_id%5+1)
-
-    for i in range(x.shape[0] / batchsize):
-        xb, yb = x[i*batchsize:(i+1)*batchsize,:], y[i*batchsize:(i+1)*batchsize,:]
-        nn[0].Feed(xb)
-        label.Feed(yb)
-        for h in range(1, len(nn)):
-            nn[h].ComputeFeature(nn[h-1])
-        loss.ComputeFeature(nn[-1], label)
-        if (i+1)%disp_freq == 0:
-            print '  Step {:>3}: '.format(i+1 + dataset_id*(x.shape[0]/batchsize)),
-            loss.display()
-
-        loss.ComputeGradient()
-        for h in range(len(nn)-1, 0, -1):
-            nn[h].ComputeGradient()
-            sgd.Update(i+1, nn[h])

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/examples/train_mnist.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/train_mnist.py b/tool/python/examples/train_mnist.py
deleted file mode 100755
index b8e6217..0000000
--- a/tool/python/examples/train_mnist.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env python
-
-#/************************************************************
-#*
-#* Licensed to the Apache Software Foundation (ASF) under one
-#* or more contributor license agreements.  See the NOTICE file
-#* distributed with this work for additional information
-#* regarding copyright ownership.  The ASF licenses this file
-#* to you under the Apache License, Version 2.0 (the
-#* "License"); you may not use this file except in compliance
-#* with the License.  You may obtain a copy of the License at
-#*
-#*   http://www.apache.org/licenses/LICENSE-2.0
-#*
-#* Unless required by applicable law or agreed to in writing,
-#* software distributed under the License is distributed on an
-#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#* KIND, either express or implied.  See the License for the
-#* specific language governing permissions and limitations
-#* under the License.
-#*
-#*************************************************************/
-
-'''
-Example script of MLP model for MNIST dataset
-'''
-import os, sys
-import numpy as np
-
-current_path_ = os.path.dirname(__file__)
-singa_root_=os.path.abspath(os.path.join(current_path_,'../../..'))
-sys.path.append(os.path.join(singa_root_,'tool','python'))
-
-from singa.driver import Driver
-from singa.layer import *
-from singa.model import *
-
-def swap32(x):
-    return (((x << 24) & 0xFF000000) |
-            ((x <<  8) & 0x00FF0000) |
-            ((x >>  8) & 0x0000FF00) |
-            ((x >> 24) & 0x000000FF))
-
-def load_dataset():
-    ''' MNIST dataset
-        train-images: 4 int32 headers & int8 pixels
-        train-labels: 2 int32 headers & int8 labels
-    '''
-    print '[Load MNIST dataset]'
-    fname_train_image = "examples/mnist/train-images-idx3-ubyte"
-    fname_train_label = "examples/mnist/train-labels-idx1-ubyte"
-    nb_header = [4, 2]
-
-    info = swap32(np.fromfile(fname_train_image, dtype=np.uint32, count=nb_header[0]))
-    nb_samples = info[1] 
-    shape = (info[2],info[3])
-    
-    x = np.fromfile(fname_train_image, dtype=np.uint8)
-    x = x[np.dtype(np.int32).itemsize*nb_header[0]:] # skip header
-    x = x.reshape(nb_samples, shape[0]*shape[1]) 
-    print '   data x:', x.shape
-    y = np.fromfile(fname_train_label, dtype=np.uint8)
-    y = y[np.dtype(np.int32).itemsize*nb_header[1]:] # skip header
-    y = y.reshape(nb_samples, 1) 
-    print '  label y:', y.shape
-
-    return x, y
-
-#-------------------------------------------------------------------
-print '[Layer registration/declaration]'
-d = Driver()
-d.Init(sys.argv)
-
-input = ImageInput(28, 28)
-label = LabelInput()
-
-nn = []
-nn.append(input)
-nn.append(Dense(2500, init='uniform'))
-nn.append(Activation('stanh'))
-nn.append(Dense(2000, init='uniform'))
-nn.append(Activation('stanh'))
-nn.append(Dense(1500, init='uniform'))
-nn.append(Activation('stanh'))
-nn.append(Dense(1000, init='uniform'))
-nn.append(Activation('stanh'))
-nn.append(Dense(500, init='uniform'))
-nn.append(Activation('stanh'))
-nn.append(Dense(10, init='uniform'))
-loss = Loss('softmaxloss')
-
-# updater
-sgd = SGD(lr=0.001, lr_type='step')
-
-#-------------------------------------------------------------------
-batchsize = 64 
-disp_freq = 10
-
-x, y = load_dataset()
-
-print '[Start training]'
-for i in range(x.shape[0] / batchsize):
-    xb, yb = x[i*batchsize:(i+1)*batchsize,:], y[i*batchsize:(i+1)*batchsize,:]
-    nn[0].Feed(xb)
-    label.Feed(yb)
-    for h in range(1, len(nn)):
-        nn[h].ComputeFeature(nn[h-1])
-    loss.ComputeFeature(nn[-1], label)
-    if (i+1)%disp_freq == 0:
-        print '  Step {:>3}: '.format(i+1),
-        loss.display()
-
-    loss.ComputeGradient()
-    for h in range(len(nn)-1, 0, -1):
-        nn[h].ComputeGradient()
-        sgd.Update(i+1, nn[h])
-

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/singa.py
----------------------------------------------------------------------
diff --git a/tool/python/singa.py b/tool/python/singa.py
deleted file mode 100755
index e44e94d..0000000
--- a/tool/python/singa.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-
-#/************************************************************
-#*
-#* Licensed to the Apache Software Foundation (ASF) under one
-#* or more contributor license agreements.  See the NOTICE file
-#* distributed with this work for additional information
-#* regarding copyright ownership.  The ASF licenses this file
-#* to you under the Apache License, Version 2.0 (the
-#* "License"); you may not use this file except in compliance
-#* with the License.  You may obtain a copy of the License at
-#*
-#*   http://www.apache.org/licenses/LICENSE-2.0
-#*
-#* Unless required by applicable law or agreed to in writing,
-#* software distributed under the License is distributed on an
-#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#* KIND, either express or implied.  See the License for the
-#* specific language governing permissions and limitations
-#* under the License.
-#*
-#*************************************************************/
-
-import os
-import sys
-import string
-import pb2.job_pb2 as job_pb2
-import singa.driver as driver
-from google.protobuf.text_format import Merge
-
-if __name__ == '__main__':
-    """Invoke the training program using this python script.
-    ./bin/singa-run.sh -exec tool/python/singa.py -conf examples/cifar10/job.conf
-    """
- 
-    i = sys.argv.index('-conf')
-    s = open(sys.argv[i+1], 'r').read()
-    s = str(s)
-    j = job_pb2.JobProto()
-    Merge(s, j)
-    b = j.SerializeToString()
-    d = driver.Driver()
-    d.InitLog(sys.argv[0])
-    d.Init(sys.argv)
-    d.Train(False, b)
-    #d.Test(b)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/singa/__init__.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/__init__.py b/tool/python/singa/__init__.py
deleted file mode 100644
index a796a7a..0000000
--- a/tool/python/singa/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#/************************************************************
-#*
-#* Licensed to the Apache Software Foundation (ASF) under one
-#* or more contributor license agreements.  See the NOTICE file
-#* distributed with this work for additional information
-#* regarding copyright ownership.  The ASF licenses this file
-#* to you under the Apache License, Version 2.0 (the
-#* "License"); you may not use this file except in compliance
-#* with the License.  You may obtain a copy of the License at
-#*
-#*   http://www.apache.org/licenses/LICENSE-2.0
-#*
-#* Unless required by applicable law or agreed to in writing,
-#* software distributed under the License is distributed on an
-#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#* KIND, either express or implied.  See the License for the
-#* specific language governing permissions and limitations
-#* under the License.
-#*
-#*************************************************************/
-
-

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/singa/driver.i
----------------------------------------------------------------------
diff --git a/tool/python/singa/driver.i b/tool/python/singa/driver.i
deleted file mode 100644
index 63f2287..0000000
--- a/tool/python/singa/driver.i
+++ /dev/null
@@ -1,117 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-/*interface file for swig */
-
-%module driver
-%include "std_vector.i"
-%include "std_string.i"
-%include "argcargv.i"
-%include "carrays.i"
-%array_class(float, floatArray);
-
-%apply (int ARGC, char **ARGV) { (int argc, char **argv)  }
-%{
-#include "singa/driver.h"
-#include "singa/worker.h"
-#include "singa/neuralnet/layer.h"
-#include "singa/neuralnet/neuron_layer.h"
-#include "singa/neuralnet/loss_layer.h"
-#include "singa/utils/blob.h"
-#include "singa/utils/param.h"
-#include "singa/utils/updater.h"
-#include "singa/proto/job.pb.h"
-#include "singa/proto/common.pb.h"
-%}
-
-namespace std {
-  %template(strVector) vector<string>;
-  %template(intVector) vector<int>;
-  %template(floatVector) vector<float>;
-  %template(layerVector) vector<singa::Layer*>;
-  %template(paramVector) vector<singa::Param*>;
-}
-
-namespace singa{
-  class Driver{
-    public:
-    void Train(bool resume, const std::string job_conf);
-    void Init(int argc, char **argv);
-    void InitLog(char* arg);
-    void Test(const std::string job_conf);
-  };
-
-  %nodefault Worker;
-  class Worker{
-    public:
-      static singa::Worker* CreateWorker(const std::string str);
-      void InitNetParams(const std::string& folder, std::vector<singa::Layer*> net);
-      void Checkpoint(int step, const std::string& folder, std::vector<singa::Layer*> net);
-  };
-    
-  class DummyLayer{
-    public:
-      void Setup(const std::string str, const std::vector<singa::Layer*>& srclayers);
-      void Feed(int batchsize, std::vector<float>& data, std::vector<int>& aux_data);
-      singa::Layer* ToLayer();
-  };
-
-  %nodefault Layer;
-  class Layer{
-    public:
-      static singa::Layer* CreateLayer(const std::string str);
-      static void SetupLayer(singa::Layer* layer, const std::string str, const std::vector<singa::Layer*>& srclayers);
-      virtual void ComputeFeature(int flag, const std::vector<singa::Layer*>& srclayers); 
-      virtual void ComputeGradient(int flag, const std::vector<singa::Layer*>& srclayers);
-      virtual const singa::Blob<float>& data(const singa::Layer* from); 
-      virtual const std::vector<singa::Param*> GetParams();
-      virtual const std::string ToString(bool debug, int flag);
-      void SetParams(std::vector<singa::Param*> params);
-  };
-
-  %nodefault Updater;
-  class Updater{
-    public:
-      static singa::Updater* CreateUpdater(const std::string str);
-      virtual void Update(int step, singa::Param* param, float grad_scale);
-  };
-
-  template <typename Dtype>
-  class Blob{
-    public:
-      inline int count();
-      inline const std::vector<int>& shape();
-      inline Dtype* mutable_cpu_data(); 
-      inline const Dtype* cpu_data();
-  };
-
-  class Param{
-    public:
-      inline int size();
-      inline const std::vector<int>& shape();
-      inline float* mutable_cpu_data();
-      void FromProto(const std::string str);
-      /*void ToProto(singa::BlobProto* blob); 
-      */
-  };
-
-  %template(floatBlob) Blob<float>;
-}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/singa/generatepy.sh
----------------------------------------------------------------------
diff --git a/tool/python/singa/generatepy.sh b/tool/python/singa/generatepy.sh
deleted file mode 100755
index 488d96a..0000000
--- a/tool/python/singa/generatepy.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-#/**
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#The following commands are only for developers adding new py apis.
-swig -c++ -python driver.i
-#g++ -fPIC ../../../src/driver.cc driver_wrap.cxx -shared -o _driver.so \
-# 	 -L../../../.libs/ -lsinga -DMSHADOW_USE_CUDA=0 \
-#    -DMSHADOW_USE_CBLAS=1 -DMSHADOW_USE_MKL=0 -std=c++11 \
-#    -I../../../include \
-#    -I/usr/include/python2.7/

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/singa/initializations.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/initializations.py b/tool/python/singa/initializations.py
deleted file mode 100644
index f016f1f..0000000
--- a/tool/python/singa/initializations.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python
-
-#/************************************************************
-#*
-#* Licensed to the Apache Software Foundation (ASF) under one
-#* or more contributor license agreements.  See the NOTICE file
-#* distributed with this work for additional information
-#* regarding copyright ownership.  The ASF licenses this file
-#* to you under the Apache License, Version 2.0 (the
-#* "License"); you may not use this file except in compliance
-#* with the License.  You may obtain a copy of the License at
-#*
-#*   http://www.apache.org/licenses/LICENSE-2.0
-#*
-#* Unless required by applicable law or agreed to in writing,
-#* software distributed under the License is distributed on an
-#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#* KIND, either express or implied.  See the License for the
-#* specific language governing permissions and limitations
-#* under the License.
-#*
-#*************************************************************/
-
-'''
-This module pre-defines initial value for fields
-'''
-
-def get_init_values(identifier, **kwargs):
-    '''
-    This method returns field, a set of key-value pairs, that
-    key is specified by identifier and values are initialized.
-    '''
-
-    field = {}
-
-    if identifier == 'none':
-        return
-
-    if identifier == 'uniform':
-        scale = kwargs['scale'] if 'scale' in kwargs else 0.05
-        names = ['low', 'high']
-        values = [-scale, scale]
-
-    elif identifier == 'constant':
-        names = ['value']
-        values = [0]
-
-    elif identifier == 'gaussian':
-        names = ['mean', 'std']
-        values = [0, 0.01]
-
-    elif identifier == 'conv2d':
-        names = ['stride', 'pad']
-        values = [1, 0]
-
-    elif identifier == 'lrn2d':
-        names = ['alpha', 'beta', 'knorm']
-        values = [1, 0.75, 1]
-
-    elif identifier == 'dropout':
-        names = ['ratio']
-        values = [0.5]
-
-    for i in range(len(names)):
-        field[names[i]] = kwargs[names[i]] if names[i] in kwargs else values[i]
-
-    return field

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/singa/layer.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/layer.py b/tool/python/singa/layer.py
deleted file mode 100644
index c9a992d..0000000
--- a/tool/python/singa/layer.py
+++ /dev/null
@@ -1,693 +0,0 @@
-#!/usr/bin/env python
-
-#/************************************************************
-#*
-#* Licensed to the Apache Software Foundation (ASF) under one
-#* or more contributor license agreements.  See the NOTICE file
-#* distributed with this work for additional information
-#* regarding copyright ownership.  The ASF licenses this file
-#* to you under the Apache License, Version 2.0 (the
-#* "License"); you may not use this file except in compliance
-#* with the License.  You may obtain a copy of the License at
-#*
-#*   http://www.apache.org/licenses/LICENSE-2.0
-#*
-#* Unless required by applicable law or agreed to in writing,
-#* software distributed under the License is distributed on an
-#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#* KIND, either express or implied.  See the License for the
-#* specific language governing permissions and limitations
-#* under the License.
-#*
-#*************************************************************/
-
-'''
-This script includes Layer class and its subclasses that
-users can configure different types of layers for their model.
-'''
-import numpy as np
-from singa.parameter import Parameter, set_param_field
-from singa.initializations import get_init_values
-from singa.utils.utility import setval, generate_name
-from singa.utils.message import *
-from google.protobuf import text_format
-
-from singa.driver import Layer as SingaLayer, Updater as SingaUpdater,\
-                         intVector, floatVector, layerVector,\
-                         paramVector, floatArray_frompointer, DummyLayer
-
-class Layer(object):
-
-    singaupdater = None
-
-    def __init__(self, **kwargs):
-        '''
-        **kwargs (KEY=VALUE)
-          partition_dim = (int)  // partition dimension for net
-        '''
-
-        self.layer = Message('Layer', **kwargs).proto
-        # required field
-        if not 'name' in kwargs:
-            setval(self.layer, name=generate_name('layer', 1))
-
-        # layer connectivity is set in Model.build()
-        self.is_datalayer = False
-        self.singalayer = None
-        self.srclayers = []
-
-        # set src for Rafiki
-        if 'src' in kwargs:
-            self.src = kwargs['src']
-        else:
-            self.src = None
-
-    def setup(self, srclys):
-        ''' Create singa::Layer and store srclayers
-        '''
-        if self.singalayer == None:
-            self.singalayer = SingaLayer.CreateLayer(
-                                    self.layer.SerializeToString())
-            self.singaSrclayerVector = layerVector(len(srclys))
-            for i in range(len(srclys)):
-                self.srclayers.append(srclys[i])
-                self.singaSrclayerVector[i] = srclys[i].get_singalayer()
-            # set up the layer
-            SingaLayer.SetupLayer(self.singalayer,
-                                  self.layer.SerializeToString(),
-                                  self.singaSrclayerVector)
-
-    def ComputeFeature(self, *srclys):
-        ''' The method creates and sets up singa::Layer
-            and maintains its source layers
-            then call ComputeFeature for data transformation.
-
-            *srclys = (list)  // a list of source layers
-        '''
-        # create singa::Layer and store srclayers
-        if self.singalayer == None:
-            if self.src != None:
-                srclys = self.src
-            self.singalayer = SingaLayer.CreateLayer(
-                                    self.layer.SerializeToString())
-            self.singaSrclayerVector = layerVector(len(srclys))
-            for i in range(len(srclys)):
-                self.srclayers.append(srclys[i])
-                self.singaSrclayerVector[i] = srclys[i].get_singalayer()
-            # set up the layer
-            SingaLayer.SetupLayer(self.singalayer,
-                                  self.layer.SerializeToString(),
-                                  self.singaSrclayerVector)
-
-        self.singalayer.ComputeFeature(1, self.singaSrclayerVector)
-
-    def ComputeGradient(self):
-        ''' The method creates singa::Updater
-            and calls ComputeGradient for gradient computation
-            then updates the parameters.
-        '''
-        # call ComputeGradient of Singa
-        self.singalayer.ComputeGradient(1, self.singaSrclayerVector)
-
-    def UpdateParams(self, step, upd):
-        ''' The method updates parameter values
-        '''
-        # update parameters
-        singaParams = self.singalayer.GetParams()
-        for par in singaParams:
-            upd.singaupdater.Update(step, par, 1.0)
-
-    def GetParams(self):
-        ''' The method gets parameter values
-            singaParams[0] for weight
-            singaParams[1] for bias
-        '''
-        singaParams = self.singalayer.GetParams()
-        assert len(singaParams) == 2, 'weight and bias'
-        # for weight
-        weight_array = floatArray_frompointer(singaParams[0].mutable_cpu_data())
-        weight = [weight_array[i] for i in range(singaParams[0].size())]
-        weight = np.array(weight).reshape(singaParams[0].shape())
-        # for bias
-        bias_array = floatArray_frompointer(singaParams[1].mutable_cpu_data())
-        bias = [bias_array[i] for i in range(singaParams[1].size())]
-        bias = np.array(bias).reshape(singaParams[1].shape()[0], 1)
-
-        return weight, bias
-
-    def SetParams(self, *params):
-        ''' The method sets parameter values
-            params[0] for weight
-            params[1] for bias
-        '''
-        singaParams = self.singalayer.GetParams()
-        import pb2.common_pb2 as cm
-        for k in range(len(params)):
-            bp = cm.BlobProto()
-            bp.shape.append(int(params[k].shape[0]))
-            bp.shape.append(int(params[k].shape[1]))
-            for i in range(params[k].shape[0]):
-                for j in range(params[k].shape[1]):
-                    bp.data.append(params[k][i, j])
-            singaParams[k].FromProto(bp.SerializeToString())
-
-    def GetData(self):
-        ''' The method gets layer data values
-        '''
-        blobptr = self.singalayer.data(self.singalayer)
-        data_array = floatArray_frompointer(blobptr.mutable_cpu_data())
-        data = [data_array[i] for i in range(blobptr.count())]
-        return data
-
-    def display(self):
-        debug, flag = False, 0
-        print self.singalayer.ToString(debug, flag)
-
-    def get_singalayer(self):
-        return self.singalayer
-
-
-class Dummy(object):
-
-    def __init__(self, **kwargs):
-        ''' Dummy layer is used for data layer to feed/fetch input data
-            or label information
-        '''
-        self.is_datalayer = True
-        self.srclayers = None
-        self.singalayer = None
-
-        # create layer proto for Dummy layer
-        kwargs = {'name':'dummy', 'type':kDummy}
-        self.layer = Message('Layer', **kwargs).proto
-
-    def setup(self, data_shape):
-        ''' Create and Setup singa Dummy layer
-            called by load_model_parameter
-        '''
-        if self.singalayer == None:
-            setval(self.layer.dummy_conf, input=True)
-            setval(self.layer.dummy_conf, shape=data_shape)
-            self.singalayer = DummyLayer()
-            self.singalayer.Setup(self.layer.SerializeToString(),
-                                  layerVector(0))
-
-    def Feed(self, shape, data, aux_data):
-        ''' Create and Setup singa::DummyLayer for input data
-            Insert data using Feed()
-        '''
-        batchsize = shape[0]
-        hdim = reduce(lambda x, y: x*y, shape[1:])
-        datasize = batchsize * hdim
-
-        # create and setup the dummy layer
-        if self.singalayer == None:
-            self.setup(shape)
-
-        if data is not None:
-            data = data.astype(np.float)
-            dataVector = floatVector(datasize)
-            for i in range(batchsize):
-                for j in range(hdim):
-                    dataVector[i*hdim+j] = data[i, j]
-            labelVector = intVector(0)
-
-        if aux_data is not None:
-            aux_data = aux_data.astype(np.int)
-            labelVector = intVector(datasize)
-            for i in range(batchsize):
-                labelVector[i] = aux_data[i, 0]
-            dataVector = floatVector(0)
-
-        self.singalayer.Feed(batchsize, dataVector, labelVector)
-
-    def get_singalayer(self):
-        return self.singalayer.ToLayer()
-
-class ImageInput(Dummy):
-    ''' This class is used to feed image data
-    '''
-    def __init__(self, width=None, height=None, nb_channel=1):
-        super(ImageInput, self).__init__()
-        self.width = width
-        self.height = height
-        self.nb_channel = nb_channel
-
-    def Feed(self, image_data):
-        batchsize = image_data.shape[0]
-        if self.width == None or self.height == None:
-            hdim = image_data.shape[1]
-            imgsize = int(np.sqrt(hdim/self.nb_channel))
-        shape = [batchsize, self.nb_channel, self.width, self.height]
-        Dummy.Feed(self, shape, image_data, None)
-
-class LabelInput(Dummy):
-    ''' This class is used to feed label data
-    '''
-    def __init__(self):
-        super(LabelInput, self).__init__()
-
-    def Feed(self, label_data):
-        Dummy.Feed(self, label_data.shape, None, label_data)
-
-
-class Data(Layer):
-
-    def __init__(self, load, phase='train', checkpoint=None,
-                 conf=None, **kwargs):
-        '''
-        required
-          load       = (string)  // type of data
-        optional
-          phase      = (string)  // phase of data layer
-          checkpoint = (string)  // checkpoint path
-          conf       = (Store)   // Store object
-          **kwargs (KEY=VALUE)
-            partition_dim = (int)  // partition dimension for net
-        '''
-
-        assert load != None, 'data type should be specified'
-        if load == 'kData':
-            super(Data, self).__init__(name=generate_name('data'),
-                                       user_type=load, **kwargs)
-        else:
-            self.layer_type = enumLayerType(load)
-            super(Data, self).__init__(name=generate_name('data'),
-                                       type=self.layer_type, **kwargs)
-        self.is_datalayer = True
-
-        # include/exclude
-        setval(self.layer, include=enumPhase(phase))
-        #setval(self.layer, exclude=kTest if phase=='train' else kTrain)
-
-        if conf == None:
-            if load == 'kData':
-                setval(self.layer.Extensions[data_conf], **kwargs)
-            else:
-                setval(self.layer.store_conf, **kwargs)
-        else:
-            setval(self.layer, store_conf=conf.proto)
-
-        self.checkpoint = checkpoint # checkpoint for training data
-
-
-class Convolution2D(Layer):
-
-    def __init__(self, nb_filter=0, kernel=0, stride=1, pad=0,
-                 init=None, w_param=None, b_param=None,
-                 activation=None, **kwargs):
-        '''
-        required
-          nb_filter = (int)        // the number of filters
-          kernel    = (int/tuple)  // the size of filter
-        optional
-          stride    = (int/tuple)  // the size of stride
-          pad       = (int/tuple)  // the size of padding
-          init      = (string)     // 'uniform', 'gaussian', 'constant'
-          w_param   = (Parameter)  // Parameter object for weight
-          b_param   = (Parameter)  // Parameter object for bias
-          **kwargs (KEY=VALUE)
-            w_lr = (float) // learning rate multiplier for weight, used to
-                           // scale the learning rate when updating parameters.
-            w_wd = (float) // weight decay multiplier for weight, used to
-                           // scale the weight decay when updating parameters.
-            b_lr = (float) // learning rate multiplier for bias
-            b_wd = (float) // weight decay multiplier for bias
-        '''
-
-        assert nb_filter > 0, 'nb_filter should be set as positive int'
-        super(Convolution2D, self).__init__(name=generate_name('conv', 1),
-                                            type=kCConvolution, **kwargs)
-        fields = {"num_filters":nb_filter}
-        # for kernel
-        if type(kernel) == int:
-            fields['kernel'] = kernel
-        else:
-            fields['kernel_x'] = kernel[0]
-            fields['kernel_y'] = kernel[1]
-        # for stride
-        if type(stride) == int:
-            fields['stride'] = stride
-        else:
-            fields['stride_x'] = stride[0]
-            fields['stride_y'] = stride[1]
-        # for pad
-        if type(pad) == int:
-            fields['pad'] = pad
-        else:
-            fields['pad_x'] = pad[0]
-            fields['pad_y'] = pad[1]
-
-        setval(self.layer.convolution_conf, **fields)
-
-        # parameter w
-        if w_param == None:
-            self.init = 'gaussian' if init == None else init
-            w_param = Parameter(init=self.init)
-        set_param_field(w_param.param, 'w', True, **kwargs)
-        setval(self.layer, param=w_param.param)
-
-        # parameter b
-        if b_param == None:
-            self.init = 'constant' if init == None else init
-            b_param = Parameter(init=self.init) # default: constant
-        set_param_field(b_param.param, 'b', True, **kwargs)
-        setval(self.layer, param=b_param.param)
-
-        # following layers: e.g., activation, dropout, etc.
-        if activation:
-            self.mask = Activation(activation=activation).layer
-
-
-class MaxPooling2D(Layer):
-
-    def __init__(self, pool_size=None,
-                 stride=1, ignore_border=True, **kwargs):
-        '''
-        Max Pooling layer
-
-        required
-          pool_size     = (int|tuple) // the size for pooling
-        optional
-          stride        = (int)       // the size of striding
-          ignore_border = (bool)      // flag for padding
-          **kwargs                    // fields for Layer class
-        '''
-
-        assert pool_size != None, 'pool_size is required'
-        if type(pool_size) == int:
-            pool_size = (pool_size, pool_size)
-        assert type(pool_size) == tuple and pool_size[0] == pool_size[1], \
-               'currently pool size should be square in Singa'
-        super(MaxPooling2D, self).__init__(name=generate_name('pool'),
-                                           type=kCPooling, **kwargs)
-        fields = {'pool' : PoolingProto().MAX,
-                  'kernel' : pool_size[0],
-                  'stride' : stride,
-                  'pad' : 0 if ignore_border else 1}
-        setval(self.layer.pooling_conf, **fields)
-
-class AvgPooling2D(Layer):
-
-    def __init__(self, pool_size=None,
-                 stride=1, ignore_border=True, **kwargs):
-        '''
-        required
-          pool_size     = (int|tuple) // size for pooling
-        optional
-          stride        = (int)       // size of striding
-          ignore_border = (bool)      // flag for padding
-          **kwargs                    // fields for Layer class
-        '''
-
-        assert pool_size != None, 'pool_size is required'
-        if type(pool_size) == int:
-            pool_size = (pool_size, pool_size)
-        assert type(pool_size) == tuple and pool_size[0] == pool_size[1], \
-               'currently pool size should be square in Singa'
-        super(AvgPooling2D, self).__init__(name=generate_name('pool'),
-                                           type=kCPooling, **kwargs)
-        self.layer.pooling_conf.pool = PoolingProto().AVG
-        fields = {'pool' : PoolingProto().AVG,
-                  'kernel' : pool_size[0],
-                  'stride' : stride,
-                  'pad' : 0 if ignore_border else 1}
-        setval(self.layer.pooling_conf, **fields)
-
-class LRN2D(Layer):
-
-    def __init__(self, size=0, **kwargs):
-        '''
-        required
-          size = (int)  // local size
-        '''
-
-        super(LRN2D, self).__init__(name=generate_name('norm'), type=kLRN, **kwargs)
-        # required
-        assert size != 0, 'local size should be set'
-        self.layer.lrn_conf.local_size = size
-        init_values = get_init_values('lrn2d', **kwargs)
-        setval(self.layer.lrn_conf, **init_values)
-
-class Loss(Layer):
-
-    def __init__(self, lossname, topk=1, **kwargs):
-        '''
-        required
-          lossname = (string) // softmaxloss, euclideanloss
-        '''
-        self.layer_type = enumLayerType(lossname)
-        super(Loss, self).__init__(name=generate_name(lossname),
-                                         type=self.layer_type, **kwargs)
-        if lossname == 'softmaxloss':
-            self.layer.softmaxloss_conf.topk = topk
-
-class Activation(Layer):
-
-    def __init__(self, activation='stanh', **kwargs):
-        '''
-        required
-          activation = (string) // relu, sigmoid, tanh, stanh, softmax.
-        '''
-        if activation == 'tanh':
-            print 'Warning: Tanh layer is not supported for CPU'
-
-        self.name = activation
-        self.layer_type = kActivation
-        if activation == 'stanh':
-            self.layer_type = kSTanh
-        elif activation == 'softmax':
-            self.layer_type = kSoftmax
-        super(Activation, self).__init__(name=generate_name(self.name),
-                                         type=self.layer_type, **kwargs)
-        if activation == 'relu':
-            self.layer.activation_conf.type = RELU
-        elif activation == 'sigmoid':
-            self.layer.activation_conf.type = SIGMOID
-        elif activation == 'tanh':
-            self.layer.activation_conf.type = TANH # for GPU
-        #elif activation == 'stanh':
-        #    self.layer.activation_conf.type = STANH
-
-
-class Dropout(Layer):
-
-    def __init__(self, ratio=0.5):
-        '''
-        required
-          ratio = (float) // ratio of drop out nodes
-        '''
-
-        self.name = 'dropout'
-        self.layer_type = enumLayerType(self.name)
-        super(Dropout, self).__init__(name=generate_name(self.name),
-                                      type=self.layer_type, **kwargs)
-        self.layer.dropout_conf.dropout_ratio = ratio
-
-class Accuracy(Layer):
-
-    def __init__(self, **kwargs):
-        '''
-        '''
-
-        self.name = 'accuracy'
-        self.layer_type = enumLayerType(self.name)
-        super(Accuracy, self).__init__(name=generate_name(self.name),
-                                       type=self.layer_type, **kwargs)
-
-class RGB(Layer):
-
-    def __init__(self, meanfile=None, **kwargs):
-        '''
-        required
-          meanfile = (string) // path to meanfile (depreciated)
-        '''
-
-        assert meanfile != None, 'meanfile should be specified'
-        self.name = 'rgb'
-        self.layer_type = kRGBImage
-        super(RGB, self).__init__(name=generate_name(self.name),
-                                  type=self.layer_type)
-        self.layer.rgbimage_conf.meanfile = meanfile
-
-class Dense(Layer):
-
-    def __init__(self, output_dim=0, activation=None,
-                 init=None, w_param=None, b_param=None, input_dim=None,
-                 **kwargs):
-        '''
-        required
-          output_dim = (int)
-        optional
-          activation = (string)
-          init       = (string)     // 'uniform', 'gaussian', 'constant'
-          w_param    = (Parameter)  // Parameter object for weight
-          b_param    = (Parameter)  // Parameter object for bias
-          **kwargs
-            w_lr = (float) // learning rate multiplier for weight, used to
-                           // scale the learning rate when updating parameters.
-            w_wd = (float) // weight decay multiplier for weight, used to
-                           // scale the weight decay when updating parameters.
-            b_lr = (float) // learning rate multiplier for bias
-            b_wd = (float) // weight decay multiplier for bias
-        '''
-        # required
-        assert output_dim > 0, 'output_dim should be set'
-        super(Dense, self).__init__(type=kInnerProduct, **kwargs)
-        self.layer.innerproduct_conf.num_output = output_dim
-        if 'transpose' in kwargs:
-            self.layer.innerproduct_conf.transpose = kwargs['transpose']
-
-        # parameter w (default: gaussian)
-        if w_param == None:
-            self.init = 'gaussian' if init == None else init
-            w_param = Parameter(init=self.init)
-        set_param_field(w_param.param, 'w', False, **kwargs)
-        setval(self.layer, param=w_param.param)
-
-        # parameter b (default: constant)
-        if b_param == None:
-            self.init = 'constant' if init == None else init
-            b_param = Parameter(init=self.init)
-        set_param_field(b_param.param, 'b', False, **kwargs)
-        setval(self.layer, param=b_param.param)
-
-        # following layers: e.g., activation, dropout, etc.
-        if activation:
-            self.mask = Activation(activation=activation).layer
-
-
-''' Classes to deal with multiple layers
-'''
-class Autoencoder(object):
-
-    def __init__(self, hid_dim=None, out_dim=0,
-                 activation=None, param_share=True):
-        '''
-        Generate a set of layers (like MLP) for encoder and decoder
-        The layers are expanded and added in Sequential.add()
-
-        required
-          hid_dim     = (int/list) // the number of nodes in hidden layers
-          out_dim     = (int)      // the number of nodes in the top layer
-        optional
-          activation  = (string)
-          param_share = (bool)     // to share params in encoder and decoder
-        '''
-
-        # required
-        assert out_dim > 0, 'out_dim should be set'
-        self.out_dim = out_dim
-        assert hid_dim != None, 'hid_dim should be set'
-        self.hid_dim = [hid_dim] if type(hid_dim) == int else hid_dim
-
-        self.layer_type = 'AutoEncoder'
-        self.activation = activation
-        self.param_share = param_share
-
-class RBM(Layer):
-
-    def __init__(self, out_dim=None, w_param=None, b_param=None,
-                 sampling=None, **kwargs):
-        '''
-        Generate a set of layers (like MLP) according to the number of elements
-          in out_dim, and on top of it, two layers RBMVis and RBMHid with
-          bidirectional connection
-        The layers are expanded and added in Energy.add()
-
-        required
-          out_dim  = (int) or (int list) // the number of hidden nodes
-        optional
-          w_param  = (Parameter)  // Parameter object for weight
-          b_param  = (Parameter)  // Parameter object for bias
-          sampling = (string)
-        '''
-
-        assert out_dim > 0, 'out_dim should be set'
-        self.out_dim = [out_dim] if type(out_dim) == int else out_dim
-
-        self.name = kwargs['name'] if 'name' in kwargs else 'RBMVis'
-        self.layer_type = kwargs['type'] if 'type' in kwargs else kRBMVis
-        super(RBM, self).__init__(name=generate_name(self.name,
-                                                     withnumber=False),
-                                  type=self.layer_type, **kwargs)
-        setval(self.layer.rbm_conf, hdim=self.out_dim[-1])
-        if self.layer_type == kRBMHid and sampling != None:
-            if sampling == 'gaussian':
-                setval(self.layer.rbm_conf, gaussian=True)
-
-        # parameter w
-        if w_param == None:
-            w_param = Parameter(init='gaussian', **kwargs)
-            set_param_field(w_param.param, 'w', withnumber=False,
-                            level=len(self.out_dim), **kwargs)
-        else:
-            if self.layer_type == kRBMHid:
-                del kwargs['name']
-            else:
-                set_param_field(w_param.param, 'w', withnumber=False,
-        	  	        level=len(self.out_dim), **kwargs)
-        setval(self.layer, param=w_param.param)
-
-        # parameter b
-        if b_param == None:
-            b_param = Parameter(init='constant', **kwargs)
-            set_param_field(b_param.param, 'b', withnumber=False,
-        		    level=len(self.out_dim), **kwargs)
-        else:
-            if self.layer_type == kRBMHid:
-                pass
-            else:
-                set_param_field(b_param.param, 'b', withnumber=False,
-        		        level=len(self.out_dim), **kwargs)
-        setval(self.layer, param=b_param.param)
-
-        if self.layer_type == kRBMVis:
-            wname = w_param.param.name
-            parw = Parameter(name=wname+"_", init='none', share_from=wname)
-            bname = b_param.param.name
-            parb = Parameter(name=bname+"2", wd=0, init='constant')
-            self.bidirect = RBM(self.out_dim, name='RBMHid', type=kRBMHid,
-                         w_param=parw, b_param=parb, sampling=sampling).layer
-
-class Embedding(Layer):
-
-    def __init__(self, in_dim, out_dim, w_param=None, **kwargs):
-
-        super(Embedding, self).__init__(name=generate_name('embedding', 1),
-                                        user_type='kEmbedding')
-        fields = {'vocab_size': in_dim,
-                  'word_dim': out_dim}
-        setval(self.layer.Extensions[embedding_conf], **fields)
-        if w_param == None:
-            # default: uniform
-            w_param = Parameter(name=generate_name('w'), init=init)
-        else:
-            set_param_field(w_param.param, 'w', True, **kwargs)
-        setval(self.layer, param=w_param.param)
-
-class RNNLM(Layer):
-
-    def __init__(self, dim, w_param=None, **kwargs):
-
-        super(RNNLM, self).__init__(name=generate_name('hidden', 1),
-                                    user_type='kHidden')
-        if w_param == None:
-            # default: uniform
-            w_param = Parameter(name=generate_name('w'), init=init)
-        else:
-            set_param_field(w_param.param, 'w', True, **kwargs)
-        setval(self.layer, param=w_param.param)
-
-class UserLossRNNLM(Layer):
-
-    def __init__(self, **kwargs):
-
-        super(UserLossRNNLM, self).__init__(name=generate_name('loss', 1),
-                                            user_type='kLoss')
-        self.layer.Extensions[loss_conf].nclass = kwargs['nclass']
-        self.layer.Extensions[loss_conf].vocab_size = kwargs['vocab_size']
-        setval(self.layer, param=Parameter(name=generate_name('w'),
-                                           init='uniform', scale=0.3).param)
-        setval(self.layer, param=Parameter(name=generate_name('w', 1),
-                                           init='uniform', scale=0.3).param)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/singa/model.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/model.py b/tool/python/singa/model.py
deleted file mode 100644
index 4a6a688..0000000
--- a/tool/python/singa/model.py
+++ /dev/null
@@ -1,716 +0,0 @@
-#!/usr/bin/env python
-
-#/************************************************************
-#*
-#* Licensed to the Apache Software Foundation (ASF) under one
-#* or more contributor license agreements.  See the NOTICE file
-#* distributed with this work for additional information
-#* regarding copyright ownership.  The ASF licenses this file
-#* to you under the Apache License, Version 2.0 (the
-#* "License"); you may not use this file except in compliance
-#* with the License.  You may obtain a copy of the License at
-#*
-#*   http://www.apache.org/licenses/LICENSE-2.0
-#*
-#* Unless required by applicable law or agreed to in writing,
-#* software distributed under the License is distributed on an
-#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#* KIND, either express or implied.  See the License for the
-#* specific language governing permissions and limitations
-#* under the License.
-#*
-#*************************************************************/
-
-'''
-This script includes Model class and its subclasses that
-users can configure model parameter.
-'''
-
-import sys, re, subprocess
-from singa.layer import *
-from singa.utils.utility import *
-from singa.utils.message import *
-from google.protobuf import text_format
-
-from singa.driver import Updater as SingaUpdater
-
-class Model(object):
-    ''' Configure model parameter
-        - add(): add layer
-        - compile(): specify Updater and Cluster protos
-        - build(): construct a model (i.e., NetProto)
-        - fit(): run singa for training
-        - evaluate(): run singa for testing
-    '''
-
-    def __init__(self, name='my model', argv=None, label=False):
-        '''
-        optional
-          name  = (string) // name of model/job
-          argv             // pass sys.argv to source
-          label = (bool)   // exist label layer (depreciated)
-        '''
-        self.jobconf = Message('Job', name=name).proto
-        self.layers = []
-        self.label = label
-        self.argv = argv
-        self.result = None
-        self.last_checkpoint_path = None
-        self.cudnn = False
-        self.accuracy = False
-
-    def add(self, layer):
-        '''
-        add layer
-        '''
-        pass
-
-    def exist_datalayer(self, phase):
-        '''
-        check if data layer exists
-        '''
-        for ly in self.layers:
-            if enumPhase(phase) in ly.layer.include:
-                return True
-        return False
-
-    def compile(self, optimizer=None, cluster=None,
-                      loss=None, topk=1, **kwargs):
-        '''
-        required
-          optimizer = (Updater) // updater settings, e.g., SGD
-          cluster   = (Cluster) // cluster settings
-        optional
-          loss      = (string)  // name of loss function type
-          topk      = (int)     // nb of results considered to compute accuracy
-        '''
-        assert optimizer != None, 'optimizer (Updater component) should be set'
-        assert cluster != None, 'cluster (Cluster component) should be set'
-        setval(self.jobconf, updater=optimizer.proto)
-        setval(self.jobconf, cluster=cluster.proto)
-
-        # take care of loss function layer
-        if loss == None:
-            print 'loss layer is not set'
-        else:
-            if hasattr(self.layers[-1], 'mask'):
-                ly = self.layers[-1].mask
-            else:
-                ly = self.layers[-1].layer
-
-            # take care of the last layer
-            if ly.type == enumLayerType('softmax'):
-                # revise the last layer
-                if loss == 'categorical_crossentropy':
-                    setval(ly, type=enumLayerType('softmaxloss'))
-                    setval(ly.softmaxloss_conf, topk=topk)
-                elif loss == 'mean_squared_error':
-                    setval(ly, type=enumLayerType('euclideanloss'))
-            else:
-                # add new layer
-                if loss == 'categorical_crossentropy':
-                    self.add(Loss('softmaxloss', topk=topk))
-                elif loss == 'mean_squared_error':
-                    self.add(Loss('euclideanloss'))
-                elif loss == 'user_loss_rnnlm': # user-defined loss layer
-                    self.add(UserLossRNNLM(nclass=kwargs['nclass'],
-                                           vocab_size=kwargs['in_dim']))
-
-    def build(self):
-        '''
-        construct neuralnet proto
-        '''
-        net = NetProto()
-        slyname = self.layers[0].layer.name
-        for i in range(len(self.layers)):
-            ly = net.layer.add()
-            ly.CopyFrom(self.layers[i].layer)
-            lastly = ly
-            if self.layers[i].is_datalayer == True:
-                continue
-            getattr(ly, 'srclayers').append(slyname)
-            slyname = ly.name
-            if hasattr(self.layers[i], 'mask'):
-                mly = net.layer.add()
-                mly.CopyFrom(self.layers[i].mask)
-                getattr(mly, 'srclayers').append(slyname)
-                slyname = mly.name
-                lastly = mly
-            if hasattr(self.layers[i], 'bidirect'):
-                bly = net.layer.add()
-                bly.CopyFrom(self.layers[i].bidirect)
-                getattr(bly, 'srclayers').append(slyname)
-
-        # deal with label layer (depreciated)
-        if self.label == True:
-            label_layer = Layer(name='label', type=kLabel)
-            ly = net.layer.add()
-            ly.CopyFrom(label_layer.layer)
-            getattr(ly, 'srclayers').append(self.layers[0].layer.name)
-            getattr(lastly, 'srclayers').append(label_layer.layer.name)
-        else:
-            if lastly.name == 'RBMVis':
-                getattr(lastly, 'srclayers').append(bly.name)
-            else:
-                getattr(lastly, 'srclayers').append(self.layers[0].layer.name)
-
-        if self.accuracy == True:
-            smly = net.layer.add()
-            smly.CopyFrom(Layer(name='softmax', type=kSoftmax).layer)
-            setval(smly, include=kTest)
-            getattr(smly, 'srclayers').append(self.layers[-1].layer.name)
-            aly = net.layer.add()
-            aly.CopyFrom(Accuracy().layer)
-            setval(aly, include=kTest)
-            getattr(aly, 'srclayers').append('softmax')
-            getattr(aly, 'srclayers').append(self.layers[0].layer.name)
-
-        # use of cudnn
-        if self.cudnn == True:
-            self.set_cudnn_layer_type(net)
-
-        setval(self.jobconf, neuralnet=net)
-
-    def fit(self, data=None, alg='bp', nb_epoch=0,
-            with_test=False, execpath='', device=None, **fields):
-        '''
-        required
-          data        = (Data)     // Data class object for training data
-          alg         = (string)   // algorithm, e.g., 'bp', 'cd'
-          nb_epoch    = (int)      // the number of training steps
-        optional
-          with_test   = (bool)     // flag if singa runs for test data
-          execpath    = (string)   // path to user own singa (executable file)
-          device      = (int/list) // a list of gpu ids
-          **fields (KEY=VALUE)
-            batch_size       = (int)    // batch size for training data
-            train_steps      = (int)    // nb of steps for training, i.e., epoch
-            disp_freq        = (int)    // frequency to display training info
-            disp_after       = (int)    // display after this number
-            validate_data    = (Data)   // valid data, specified in load_data()
-            validate_freq    = (int)    // frequency of validation
-            validate_steps   = (int)    // total number of steps for validation
-            validate_after   = (int)    // start validation after this number
-            checkpoint_path  = (string) // path to checkpoint file
-            checkpoint_freq  = (int)    // frequency for checkpoint
-            checkpoint_after = (int)    // start checkpointing after this number
-        '''
-        assert data != None, 'Training data shold be set'
-        assert nb_epoch > 0, 'Training steps shold be set'
-
-        if 'batch_size' in fields:  # if new value is set, replace it
-            setval(data.layer.store_conf, batchsize=fields['batch_size'])
-
-        # insert layer for training
-        if self.exist_datalayer('train') == False:
-            self.layers.insert(0, data)
-        setval(self.jobconf, train_steps=nb_epoch)
-        setval(self.jobconf, disp_freq=nb_epoch/10)
-        if 'disp_freq' in fields:
-            setval(self.jobconf, disp_freq=fields['disp_freq'])
-
-        if 'validate_data' in fields:
-            self.layers.insert(1, fields['validate_data'])
-            setval(self.jobconf, validate_freq=nb_epoch/10)
-
-        setval(self.jobconf, **fields)
-
-        # loading checkpoint if it is set
-        if data.checkpoint != None:
-            setval(self.jobconf, checkpoint_path=data.checkpoint)
-
-        # save model parameter (i.e., checkpoint_path)
-        setval(self.jobconf, checkpoint_freq=nb_epoch)
-        self.last_checkpoint_path = '{0}/step{1}-worker0'.format(
-                         self.jobconf.cluster.workspace, nb_epoch)
-
-        # set Train_one_batch component, using backprogapation at default
-        setval(self.jobconf,
-               train_one_batch=Algorithm(type=enumAlgType(alg)).proto)
-
-        # use of cudnn
-        if device != None:
-            setval(self.jobconf, gpu=device)
-            self.cudnn = True
-
-        # start to run singa for training
-        if with_test == False:
-            self.build()  # construct Nneuralnet Component
-            #self.display()
-            return SingaRun(jobproto=self.jobconf,
-                            argv=self.argv, execpath=execpath)
-        else:
-            # run singa in evaluate() with test data
-            pass
-
-    def evaluate(self, data=None, alg='bp',
-                 checkpoint_path=None, execpath='',
-                 device=None, show_acc=False, **fields):
-        '''
-        required
-          data = (Data)   // Data class object for testing data
-        optional
-          alg             = (string)   // algorithm type, (bp at default)
-          checkpoint_path = (list)     // checkpoint path
-          execpaths       = (string)   // path to user's own executable
-          device          = (int/list) // a list of gpu ids
-          show_acc        = (bool)     // compute and the accuacy
-          **fields (KEY=VALUE)
-            batch_size   = (int)  // batch size for testing data
-            test_freq    = (int)  // frequency of testing
-            test_steps   = (int)  // total number of steps for testing
-            test_after   = (int)  // start testing after this number of steps
-        '''
-        assert data != None, 'Testing data should be set'
-        is_testonly = False
-
-        if 'batch_size' in fields:  # if new value is set, replace it
-            setval(data.layer.store_conf, batchsize=fields['batch_size'])
-
-        # insert layer for testing
-        if self.exist_datalayer('test') == False:
-            self.layers.insert(0, data)
-
-        # loading checkpoint if singa runs only for testing
-        if self.exist_datalayer('train') == False:
-            is_testonly = True
-            if checkpoint_path == None:
-                print 'checkpoint_path has not been specified'
-            else:
-                setval(self.jobconf, checkpoint_path=checkpoint_path)
-
-        steps = fields['test_steps'] if 'test_steps' in fields else 10
-        setval(self.jobconf, test_steps=steps)
-        setval(self.jobconf, **fields)
-
-        # set Train_one_batch component, using backprogapation at default
-        setval(self.jobconf,
-               train_one_batch=Algorithm(type=enumAlgType(alg)).proto)
-
-        # use of cudnn
-        if device != None:
-            setval(self.jobconf, gpu=device)
-            self.cudnn = True
-
-        # set True if showing the accuracy
-        self.accuracy = show_acc
-
-        self.build()  # construct Nneuralnet Component
-
-        #--- generate job.conf file for debug purpose
-        #filename = 'job.conf'
-        #with open(filename, 'w') as f:
-        #  f.write(text_format.MessageToString(self.jobconf.cluster))
-        #self.display()
-
-        #--- run singa ---
-        return SingaRun(jobproto=self.jobconf,
-                        argv=self.argv, execpath=execpath, testmode=is_testonly)
-        #return SingaRun_script(filename=filename, execpath=execpath)
-
-
-    def display(self):
-        ''' print out job proto
-        '''
-        print text_format.MessageToString(self.jobconf)
-
-    def set_cudnn_layer_type(self, net):
-        ''' convert LayerType to CdunnLayerType
-        '''
-        for i in range(len(net.layer)):
-            ly_type = net.layer[i].type
-            cudnn_ly_type = ly_type
-            if ly_type == kCConvolution: cudnn_ly_type = kCudnnConv
-            elif ly_type == kCPooling: cudnn_ly_type = kCudnnPool
-            elif ly_type == kLRN: cudnn_ly_type = kCudnnLRN
-            elif ly_type == kSoftmax: cudnn_ly_type = kCudnnSoftmax
-            elif ly_type == kSoftmaxLoss: cudnn_ly_type = kCudnnSoftmaxLoss
-            elif ly_type == kActivation:
-                cudnn_ly_type = kCudnnActivation
-            elif ly_type == kSTanh:
-                print 'Error report: STanh layer is not supported for GPU'
-            '''
-            elif ly_type == kReLU:
-                cudnn_ly_type = kCudnnActivation
-                net.layer[i].activation_conf.type = RELU
-            elif ly_type == kSigmoid:
-                cudnn_ly_type = kCudnnActivation
-                net.layer[i].activation_conf.type = SIGMOID
-            elif ly_type == kTanh:
-                cudnn_ly_type = kCudnnActivation
-                net.layer[i].activation_conf.type = TANH
-            '''
-            #elif ly_type == kSTanh:
-            #    print 'Error report: STanh layer is not supported for GPU'
-                #cudnn_ly_type = kCudnnActivation
-                #net.layer[i].activation_conf.type = STANH
-            net.layer[i].type = cudnn_ly_type
-
-    def show(self):
-        for ly in self.jobconf.neuralnet.layer:
-            print layer(ly.name)
-
-    def layer_by_id(self, k):
-        return self.jobconf.neuralnet.layer[k]
-
-    def layer_by_name(self, name):
-        return self.layers[k]
-
-    def size(self):
-        return len(self.jobconf.neuralnet.layer)
-
-class Energy(Model):
-    ''' energy model
-    '''
-
-    def __init__(self, name='my model', argv=[], label=False):
-        super(Energy, self).__init__(name=name, argv=argv, label=label)
-
-    def add(self, layer):
-        if hasattr(layer, 'layer_type'):
-            if layer.layer_type == kRBMVis:
-                dim = 0
-                for i in range(1, len(layer.out_dim)):
-                    parw = Parameter(name='w', init='none', level=i)
-                    parb = Parameter(name='b', init='none', level=i)
-                    dim = layer.out_dim[i-1]
-                    self.layers.append(Dense(dim, w_param=parw, b_param=parb,
-                                             activation='sigmoid'))
-                self.layers.append(layer)
-
-class Sequential(Model):
-    ''' sequential model
-    '''
-
-    def __init__(self, name='my model', argv=[], label=False):
-        super(Sequential, self).__init__(name=name, argv=argv, label=label)
-
-    def add(self, layer):
-        if hasattr(layer, 'layer_type'):
-            if layer.layer_type == 'AutoEncoder':
-                dim = 0
-                if layer.param_share == True:
-                    # Encoding
-                    for i in range(1, len(layer.hid_dim)+1):
-                        parw = Parameter(name='w',
-                                         init='none', level=i)
-                        parb = Parameter(name='b',
-                                         init='none', level=i)
-                        dim = layer.hid_dim[i-1]
-                        if i == len(layer.hid_dim): activation = None
-                        else: activation = layer.activation
-                        self.layers.append(Dense(dim,
-                                                 w_param=parw, b_param=parb,
-                                                 activation=activation))
-                    # Decoding
-                    for i in range(len(layer.hid_dim), 0, -1):
-                        parw = Parameter(name=generate_name('w', 2),
-                                         init='none')
-                        parb = Parameter(name=generate_name('b', 2),
-                                         init='none')
-                        setval(parw.param, share_from='w'+str(i))
-                        setval(parb.param, name='b'+str(i))
-                        if i == 1: dim = layer.out_dim
-                        else: dim = layer.hid_dim[i-2]
-                        self.layers.append(Dense(dim,
-                                                 w_param=parw, b_param=parb,
-                                                 activation=layer.activation,
-                                                 transpose=True))
-                else:
-                    # MLP
-                    for i in range(1, len(layer.hid_dim)+2):
-                        parw = Parameter(name='w',
-                                         init='none', level=i)
-                        parb = Parameter(name='b',
-                                         init='none', level=i)
-                        if i == len(layer.hid_dim)+1: dim = layer.out_dim
-                        else: dim = layer.hid_dim[i-1]
-                        self.layers.append(Dense(dim,
-                                                 w_param=parw, b_param=parb,
-                                                 activation=layer.activation))
-            else:
-                self.layers.append(layer)
-        else:
-            self.layers.append(layer)
-
-
-class Store(object):
-
-    def __init__(self, **kwargs):
-        '''
-        **kwargs
-            path       = (string)  // path to dataset
-            backend    = (string)  //
-            batch_size = (int)     // batch size of dataset
-            shape      = (int)     //
-        '''
-        self.proto = Message('Store', **kwargs).proto
-
-class Algorithm(object):
-
-    def __init__(self, type=enumAlgType('bp'), **kwargs):
-        '''
-        type = (string)  // type of algorithm, bp at default
-        '''
-        alg = Message('Alg', alg=type, **kwargs).proto
-        if type == enumAlgType('cd'):
-            setval(alg.cd_conf, **kwargs)
-        self.proto = alg
-
-class Updater(object):
-
-    def __init__(self, upd_type, lr, lr_type,
-                 decay, momentum,
-                 step, step_lr, **fields):
-        '''
-        required
-          upd_type = (enum)   // enum type of updater
-          lr       = (float)  // base learning rate
-        optional
-          lr_type  = (string) // type of the learning rate (Fixed at default)
-        '''
-        upd = Message('Updater', type=upd_type, **fields).proto
-        setval(upd.learning_rate, base_lr=lr)
-        if decay > 0:
-            setval(upd, weight_decay=decay)
-        if momentum > 0:
-            setval(upd, momentum=momentum)
-
-        if lr_type == None or lr_type == "fixed":
-            setval(upd.learning_rate, type=kFixed)
-        elif lr_type == 'step':
-            cp = Message('Step', change_freq=60, gamma=0.997)
-            setval(upd.learning_rate, type=kStep, step_conf=cp.proto)
-        elif lr_type == 'manual':
-            cp = Message('FixedStep', step=step, step_lr=step_lr)
-            setval(upd.learning_rate, type=kFixedStep, fixedstep_conf=cp.proto)
-        elif lr_type == 'linear':
-            cp = Message('Linear', change_freq=10, final_lr=0.1)
-            setval(upd.learning_rate, type=kLinear, linear_conf=cp.proto)
-
-        self.proto = upd
-        self.singaupdater = None
-
-    def Update(self, step, layer):
-        ''' This method updates parameters of layer
-            step = (int)  // training step, i.e., param version
-        '''
-        if self.singaupdater == None:
-            self.singaupdater = SingaUpdater.CreateUpdater(
-                                  self.proto.SerializeToString())
-
-        # update parameters
-        singaParams = layer.singalayer.GetParams()
-        for par in singaParams:
-            self.singaupdater.Update(step, par, 1.0)
-    
-
-class SGD(Updater):
-
-    def __init__(self, lr=0.01, lr_type=None,
-                 decay=0, momentum=0,
-                 step=(0), step_lr=(0.01), **fields):
-        '''
-        required
-           lr       = (float)      // base learning rate
-        optional
-           lr_type  = (string)     // type of learning rate, 'Fixed' at default
-           decay    = (float)      // weight decay
-           momentum = (float)      // momentum
-           step     = (int/list)   // steps
-           step_lr  = (float/list) // learning rate after the steps
-           **fields (KEY=VALUE)
-        '''
-        assert lr
-        super(SGD, self).__init__(upd_type=kSGD,
-                                  lr=lr, lr_type=lr_type,
-                                  decay=decay, momentum=momentum,
-                                  step=step, step_lr=step_lr, **fields)
-
-class AdaGrad(Updater):
-
-    def __init__(self, lr=0.01, lr_type=None,
-                 decay=0, momentum=0,
-                 step=(0), step_lr=(0.01), **fields):
-        '''
-        required
-           lr       = (float)      // base learning rate
-        optional
-           lr_type  = (string)     // type of learning rate, 'Fixed' at default
-           decay    = (float)      // weight decay
-           momentum = (float)      // momentum
-           step     = (int/list)   // steps
-           step_lr  = (float/list) // learning rate after the steps
-           **fields (KEY=VALUE)
-        '''
-        assert lr
-        super(AdaGrad, self).__init__(upd_type=kAdaGrad,
-                                  lr=lr, lr_type=lr_type,
-                                  decay=decay, momentum=momentum,
-                                  step=step, step_lr=step_lr, **fields)
-
-class Cluster(object):
-    """ Specify the cluster topology, e.g., number of workers/servers.
-
-    Currently we need to create this object in the .py file and also provide a
-    cluster configuration file to the command line. TODO(wangwei) update SINGA
-    code to eliminate the requirement of the cluster configuration file for
-    training on a single node or the cluster object in the pyfile for training
-    in a cluster.
-    """
-
-    def __init__(self, workspace=None,
-                 nworker_groups=1, nserver_groups=1,
-                 nworkers_per_group=1, nservers_per_group=1,
-                 nworkers_per_procs=1, nservers_per_procs=1,
-                 **fields):
-        '''
-        required
-          workspace = (string) // workspace path
-        optional
-          nworker_groups     = (int)
-          nserver_groups     = (int)
-          nworkers_per_group = (int)
-          nservers_per_group = (int)
-          nworkers_per_procs = (int)
-          nservers_per_procs = (int)
-          **fields
-            server_worker_separate = (bool)
-        '''
-        assert workspace != None, 'need to set workspace'
-        self.proto = Message('Cluster', workspace=workspace).proto
-        # optional
-        self.proto.nworker_groups = nworker_groups
-        self.proto.nserver_groups = nserver_groups
-        self.proto.nworkers_per_group = nworkers_per_group
-        self.proto.nservers_per_group = nservers_per_group
-        self.proto.nworkers_per_procs = nworkers_per_procs
-        self.proto.nservers_per_procs = nservers_per_procs
-        # other fields
-        setval(self.proto, **fields)
-
-
-def StoreResults(lines):
-    """ Parsing metrics from each line in the log file.
-
-    TODO(wangwei) format the log string to make them uniform for easy parsing
-    Another approach is creating a protobuf message for metrics, which can be
-    used for dumping metrics to string and loading perf string back to messages.
-    """
-
-    resultDic = {}
-    for line in lines:
-        line = re.findall(r'[\w|*.*]+', line)
-        if 'Train' in line:
-            step = line[line.index('step')+1]
-            if 'accuracy' in line:
-                resultDic.setdefault(step, {})['acc'] \
-                                             = line[line.index('accuracy')+1]
-            if 'loss' in line:
-                resultDic.setdefault(step, {})['loss'] \
-                                             = line[line.index('loss')+1]
-            if 'ppl' in line:
-                resultDic.setdefault(step, {})['ppl'] \
-                                             = line[line.index('ppl')+1]
-            if 'Squared' in line:
-                resultDic.setdefault(step, {})['se'] \
-                                             = line[line.index('Squared')+2]
-    return resultDic
-
-def SingaRun(jobproto='', argv=None, execpath='', testmode=False):
-    """
-    Run Singa and receive the training/test results.
-    """
-
-    import singa.driver as driver
-    d = driver.Driver()
-    d.InitLog(argv[0])
-    d.Init(argv)
-    if testmode == True:
-        d.Test(jobproto.SerializeToString())
-    else:
-        d.Train(False, jobproto.SerializeToString())
-
-    # Get the performance from the latest log file.
-    # TODO(wangwei) the log file would be overwritten by other running instance
-    # of the same program, e.g., lt-singa
-    logfile = '/tmp/singa-log/{0}.ERROR'.format(argv[0].split('/')[-1])
-    fin = open(logfile, 'r')
-    result = StoreResults(fin.readlines())
-
-    return result
-
-def SingaRun_script(filename='', execpath=''):
-    """
-    Deprecated.
-    Generate the job conf file and run the shell command.
-    """
-    SINGAROOT = '../../../'
-    conf = 'examples/' + filename
-    if execpath == '':
-        cmd = SINGAROOT+'bin/singa-run.sh ' \
-            + '-conf %s ' % conf
-    else:
-        cmd = SINGAROOT+'bin/singa-run.sh ' \
-            + '-conf %s ' % conf \
-            + '-exec %s ' % execpath
-
-    procs = subprocess.Popen(cmd.strip().split(' '),
-                             stdout=subprocess.PIPE,
-                             stderr=subprocess.STDOUT)
-
-    resultDic = {}
-    outputlines = iter(procs.stdout.readline, '')
-    resultDic = StoreResults(outputlines)
-
-    #TODO better format to store the result??
-    return resultDic
-
-def load_model_parameter(fin, neuralnet, batchsize=1, data_shape=None):
-    """
-    this method loads model parameter
-    """
-    hly_idx = 0
-    for i in range(len(neuralnet)): 
-        if neuralnet[i].is_datalayer:
-            if data_shape == None:
-                shape = neuralnet[i].shape
-                shape[0] = batchsize
-                neuralnet[i].setup(shape)
-            else:
-                neuralnet[i].setup(data_shape)
-        else:
-            hly_idx = i
-            break
-
-    net = layerVector(len(neuralnet)-hly_idx)
-    for i in range(hly_idx, len(neuralnet)): 
-        if neuralnet[i].src==None:
-            neuralnet[i].setup(neuralnet[i-1])
-        else:
-            neuralnet[i].setup(neuralnet[i].src)
-        net[i-hly_idx] = neuralnet[i].singalayer
-
-    from singa.driver import Worker
-    alg = Algorithm(type=enumAlgType('bp')).proto
-    w = Worker.CreateWorker(alg.SerializeToString())
-    w.InitNetParams(fin, net)
-
-def save_model_parameter(step, fout, neuralnet):
-    """
-    this method saves model parameter
-    """
-    hly_idx = 0
-    for i in range(len(neuralnet)): 
-        if not neuralnet[i].is_datalayer:
-            hly_idx = i
-            break
-
-    from singa.driver import Worker
-    net = layerVector(len(neuralnet)-hly_idx)
-    for i in range(hly_idx, len(neuralnet)): 
-        net[i-hly_idx] = neuralnet[i].singalayer
-    alg = Algorithm(type=enumAlgType('bp')).proto
-    w = Worker.CreateWorker(alg.SerializeToString())
-    w.Checkpoint(step, fout, net)
-

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/singa/parameter.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/parameter.py b/tool/python/singa/parameter.py
deleted file mode 100644
index 14ad852..0000000
--- a/tool/python/singa/parameter.py
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/env python
-
-#/************************************************************
-#*
-#* Licensed to the Apache Software Foundation (ASF) under one
-#* or more contributor license agreements.  See the NOTICE file
-#* distributed with this work for additional information
-#* regarding copyright ownership.  The ASF licenses this file
-#* to you under the Apache License, Version 2.0 (the
-#* "License"); you may not use this file except in compliance
-#* with the License.  You may obtain a copy of the License at
-#*
-#*   http://www.apache.org/licenses/LICENSE-2.0
-#*
-#* Unless required by applicable law or agreed to in writing,
-#* software distributed under the License is distributed on an
-#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#* KIND, either express or implied.  See the License for the
-#* specific language governing permissions and limitations
-#* under the License.
-#*
-#*************************************************************/
-
-'''
-This script includes Parameter class and a method, named set_param_field
-that users can configure Param and ParamGen protos.
-'''
-
-from singa.initializations import get_init_values
-from singa.utils.utility import setval, generate_name
-from singa.utils.message import *
-from google.protobuf import text_format
-
-
-class Parameter(object):
-
-    def __init__(self, **kwargs):
-        '''
-	optional
-	  **kwargs
-	    name  = (string) // parameter name
-	    lr    = (float)  // learning rate multiplier
-	    wd    = (float)  // weight decay multiplier
-	    init  = (string) // init type {'constant','uniform','gaussian'}
-	    value = (int)    // value for 'constant'
-	    scale = (float)  // [low=-scale, high=scale] for 'uniform'
-	    low   = (float)  // low value   for 'uniform'
-	    high  = (float)  // high value  for 'uniform'
-	    mean  = (float)  // mean for 'gaussian'
-	    std   = (float)  // std  for 'gaussian'
-	'''
-        fields = {'lr_scale' : kwargs['lr'] if 'lr' in kwargs else 1,
-                  'wd_scale' : kwargs['wd'] if 'wd' in kwargs else 1
-                 }
-        self.param = Message('Param', **fields).proto
-
-        if not 'name' in kwargs:
-            setval(self.param, name=generate_name('param', 1))
-        else:
-            pname = kwargs['name']
-            # parameter name for RBM
-            if 'level' in kwargs:
-                pname += str(kwargs['level'])
-                if pname[0] == 'b':
-                    pname += '2'
-            setval(self.param, name=pname)
-
-        if 'share_from' in kwargs:
-            setval(self.param, share_from=kwargs['share_from'])
-
-        if 'init' in kwargs:
-            init_values = get_init_values(kwargs['init'], **kwargs)
-            if not kwargs['init'] == 'none':
-                pgen = Message('ParamGen', type=enumInitMethod(kwargs['init']),
-                               **init_values)
-                del kwargs['init']
-                setval(self.param, init=pgen.proto)
-        else: # default: uniform
-            pgen = Message('ParamGen', type=enumInitMethod('uniform'))
-            setval(self.param, init=pgen.proto)
-
-    def update(self, **fields):
-        setval(self.param, **fields)
-        setval(self.param.init, **fields)
-
-
-def set_param_field(param, pname, changename=False, withnumber=True, **kwargs):
-    '''
-      param      = (ParamProto)
-      pname      = (string)     // 'w' for wiehgt, or 'b' for bias
-      changename = (bool)       // update parameter name if True
-      withnumber = (bool)       // add layer number if True
-      **kwargs
-        w_lr = (float) // learning rate multiplier for weight, used to
-                       // scale the learning rate when updating parameters.
-        w_wd = (float) // weight decay multiplier for weight, used to
-                       // scale the weight decay when updating parameters.
-        b_lr = (float) // learning rate multiplier for bias 
-        b_wd = (float) // weight decay multiplier for bias
-    '''
-    assert pname == 'w' or pname == 'b', 'pname should be w or b'
-
-    lr_ = param.lr_scale
-    wd_ = param.wd_scale
-    initkv = {}
-
-    if pname == 'w':
-        if 'w_lr' in kwargs:
-            lr_ = kwargs['w_lr']
-            del kwargs['w_lr']
-        if 'w_wd' in kwargs:
-            wd_ = kwargs['w_wd']
-            del kwargs['w_wd']
-        for key, val in kwargs.items():
-            if key.startswith('w_'):
-                initkv[key[2:]] = val
-
-    elif pname == 'b':
-        if 'b_lr' in kwargs:
-            lr_ = kwargs['b_lr']
-            del kwargs['b_lr']
-        if 'b_wd' in kwargs:
-            wd_ = kwargs['b_wd']
-            del kwargs['b_wd']
-        for key, val in kwargs.items():
-            if key.startswith('b_'):
-                initkv[key[2:]] = val
-
-    field = {'lr_scale' : lr_, 'wd_scale' : wd_}
-
-    # Set/update parameter fields
-    if param.name.startswith('param') or changename == True:
-        if 'level' in kwargs:  # parameter name for RBM
-            pname += str(kwargs['level'])
-        setval(param, name=generate_name(pname, withnumber=withnumber), **field)
-    else:
-        setval(param, **field)
-
-    # Set/update parameter init fields
-    setval(param.init, **initkv)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/singa/utils/__init__.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/utils/__init__.py b/tool/python/singa/utils/__init__.py
deleted file mode 100644
index a796a7a..0000000
--- a/tool/python/singa/utils/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#/************************************************************
-#*
-#* Licensed to the Apache Software Foundation (ASF) under one
-#* or more contributor license agreements.  See the NOTICE file
-#* distributed with this work for additional information
-#* regarding copyright ownership.  The ASF licenses this file
-#* to you under the Apache License, Version 2.0 (the
-#* "License"); you may not use this file except in compliance
-#* with the License.  You may obtain a copy of the License at
-#*
-#*   http://www.apache.org/licenses/LICENSE-2.0
-#*
-#* Unless required by applicable law or agreed to in writing,
-#* software distributed under the License is distributed on an
-#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#* KIND, either express or implied.  See the License for the
-#* specific language governing permissions and limitations
-#* under the License.
-#*
-#*************************************************************/
-
-

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/ed9587c0/tool/python/singa/utils/message.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/utils/message.py b/tool/python/singa/utils/message.py
deleted file mode 100644
index bfa9ef2..0000000
--- a/tool/python/singa/utils/message.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-
-#/************************************************************
-#*
-#* Licensed to the Apache Software Foundation (ASF) under one
-#* or more contributor license agreements.  See the NOTICE file
-#* distributed with this work for additional information
-#* regarding copyright ownership.  The ASF licenses this file
-#* to you under the Apache License, Version 2.0 (the
-#* "License"); you may not use this file except in compliance
-#* with the License.  You may obtain a copy of the License at
-#*
-#*   http://www.apache.org/licenses/LICENSE-2.0
-#*
-#* Unless required by applicable law or agreed to in writing,
-#* software distributed under the License is distributed on an
-#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#* KIND, either express or implied.  See the License for the
-#* specific language governing permissions and limitations
-#* under the License.
-#*
-#*************************************************************/
-
-import sys, os
-from utility import *
-sys.path.append(os.path.join(os.path.dirname(__file__), '../../pb2'))
-
-'''
-This script reads proto files in ../../pb2, generated by proto buffer compiler.
- - Message class creates an object for proto and sets initial vlaues for
-   the fields, specified by kwargs
- - make_function method generates a method named enumInitMethod that returns
-   enum values of given enum type, defined in the proto files
-'''
-
-MODULE_LIST = []
-
-# import all modules in dir singa_root/tool/python/pb2
-# except common, singa, and __init__
-for f in os.listdir(os.path.join(os.path.dirname(__file__), '../../pb2')):
-    if (f.endswith(".pyc")):
-        continue
-    if(f == "__init__.py" or f == "common_pb2.py" or f == "singa_pb2.py"):
-        continue
-    module_name = f.split('.')[0]
-    module_obj = __import__(module_name)
-    MODULE_LIST.append(module_obj)
-    for func_name in dir(module_obj):
-        if not func_name.startswith("__"):
-            globals()[func_name] = getattr(module_obj, func_name)
-
-class Message(object):
-    def __init__(self, protoname, **kwargs):
-        for module_obj in MODULE_LIST:
-            if hasattr(module_obj, protoname+"Proto"):
-                class_ = getattr(module_obj, protoname+"Proto")
-                self.proto = class_()
-                return setval(self.proto, **kwargs)
-        raise Exception('invalid protoname')
-
-enumDict_ = dict()
-
-#get all enum type list in the modules
-for module_obj in MODULE_LIST:
-    for enumtype in module_obj.DESCRIPTOR.enum_types_by_name:
-        tempDict = enumDict_[enumtype] = dict()
-        for name in getattr(module_obj, enumtype).DESCRIPTOR.values_by_name:
-            tempDict[name[1:].lower()] = getattr(module_obj, name)
-
-def make_function(enumtype):
-    def _function(key):
-        return enumDict_[enumtype][key]
-    return _function
-
-current_module = sys.modules[__name__]
-
-#def all the enumtypes
-for module_obj in MODULE_LIST:
-    for enumtype in module_obj.DESCRIPTOR.enum_types_by_name:
-        setattr(current_module, "enum"+enumtype, make_function(enumtype))