You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2016/01/01 13:29:11 UTC

[02/10] incubator-singa git commit: SINGA-81 Add Python Helper, which enables users to construct a model (JobProto) and run Singa in Python

SINGA-81 Add Python Helper, which enables users to construct a model (JobProto) and run Singa in Python

- Update examples for cudnn
  . cifar10_cnn_cudnn.py
  . mnist_mlp_cudnn.py

- Now, mnist_ae.py works properly


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/8b69cadb
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/8b69cadb
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/8b69cadb

Branch: refs/heads/master
Commit: 8b69cadb7ca015eaee1a666a541d2e6185255554
Parents: 94435eb
Author: chonho <le...@comp.nus.edu.sg>
Authored: Mon Dec 28 13:31:30 2015 +0800
Committer: chonho <le...@comp.nus.edu.sg>
Committed: Fri Jan 1 15:59:14 2016 +0800

----------------------------------------------------------------------
 tool/python/examples/cifar10_cnn_cudnn.py       |  4 +--
 .../python/examples/cifar10_cnn_cudnn_hybrid.py | 34 ++++++++++++++++++++
 tool/python/singa/model.py                      | 26 ++++++++-------
 3 files changed, 51 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/8b69cadb/tool/python/examples/cifar10_cnn_cudnn.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/cifar10_cnn_cudnn.py b/tool/python/examples/cifar10_cnn_cudnn.py
index e08610a..e3c5c49 100755
--- a/tool/python/examples/cifar10_cnn_cudnn.py
+++ b/tool/python/examples/cifar10_cnn_cudnn.py
@@ -29,6 +29,6 @@ topo = Cluster(workspace)
 m.compile(loss='categorical_crossentropy', optimizer=sgd, cluster=topo)
 
 gpu_id = [0]
-m.fit(X_train, nb_epoch=1000, with_test=True, device=gpu_id)
-result = m.evaluate(X_test, test_steps=100, test_freq=300)
+m.fit(X_train, nb_epoch=7000, with_test=True, device=gpu_id)
+result = m.evaluate(X_test, test_steps=100, test_freq=1000)
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/8b69cadb/tool/python/examples/cifar10_cnn_cudnn_hybrid.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/cifar10_cnn_cudnn_hybrid.py b/tool/python/examples/cifar10_cnn_cudnn_hybrid.py
new file mode 100755
index 0000000..f5e4c27
--- /dev/null
+++ b/tool/python/examples/cifar10_cnn_cudnn_hybrid.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import cifar10
+
+X_train, X_test, workspace = cifar10.load_data()
+
+m = Sequential('cifar10-cnn', sys.argv)
+
+m.add(Convolution2D(32, 5, 1, 2, w_std=0.0001, b_lr=2))
+m.add(MaxPooling2D(pool_size=(3,3), stride=2))
+m.add(Activation('relu'))
+m.add(LRN2D(3, alpha=0.00005, beta=0.75))
+
+m.add(Convolution2D(32, 5, 1, 2, b_lr=2))
+m.add(Activation('relu'))
+m.add(AvgPooling2D(pool_size=(3,3), stride=2))
+m.add(LRN2D(3, alpha=0.00005, beta=0.75))
+
+m.add(Convolution2D(64, 5, 1, 2))
+m.add(Activation('relu'))
+m.add(AvgPooling2D(pool_size=(3,3), stride=2))
+
+m.add(Dense(10, w_wd=250, b_lr=2, b_wd=0, activation='softmax'))
+
+sgd = SGD(decay=0.004, lr_type='fixed', step=(0,60000,65000), step_lr=(0.001,0.0001,0.00001))
+topo = Cluster(workspace, nworkers_per_group=2, nworkers_per_procs=2)
+m.compile(loss='categorical_crossentropy', optimizer=sgd, cluster=topo)
+
+gpu_id = [0,1]
+m.fit(X_train, nb_epoch=10000, with_test=True, device=gpu_id)
+result = m.evaluate(X_test, test_steps=0, test_freq=200)
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/8b69cadb/tool/python/singa/model.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/model.py b/tool/python/singa/model.py
index ade0b85..51c4126 100644
--- a/tool/python/singa/model.py
+++ b/tool/python/singa/model.py
@@ -11,6 +11,7 @@ class Model(object):
     '''
     optional
       name  = (string) // name of model/job
+      argv             // pass sys.argv to source
       label = (bool)   // exist label layer (depreciated)
     '''
     self.jobconf = Message('Job', name=name).proto 
@@ -227,7 +228,7 @@ class Model(object):
     #filename = 'job.conf'
     #with open(filename, 'w') as f:
     #  f.write(text_format.MessageToString(self.jobconf.cluster))
-    self.display()
+    #self.display()
 
     #--- run singa --- 
     return SingaRun(jobproto=self.jobconf, argv=self.argv, execpath=execpath, testmode=is_testonly)
@@ -285,8 +286,8 @@ class Sequential(Model):
   def add(self, layer):
     if hasattr(layer, 'layer_type'):
       if layer.layer_type == 'AutoEncoder':
+        dim = 0 
         if layer.param_share == True:
-          dim = 0 
           # Encoding
           for i in range(1, len(layer.hid_dim)+1):
             parw = Parameter(name='w', init='none', level=i)
@@ -368,11 +369,13 @@ class SGD(Updater):
                step=(0), step_lr=(0.01), **fields):
     '''
     required
-       lr      = (float)  // base learning rate
+       lr       = (float)      // base learning rate
     optional
-       lr_type = (string) // type of learning rate, 'Fixed' at default
-       decay    = (float) // weight decay
-       momentum = (float) // momentum
+       lr_type  = (string)     // type of learning rate, 'Fixed' at default
+       decay    = (float)      // weight decay
+       momentum = (float)      // momentum
+       step     = (int/list)   // steps
+       step_lr  = (float/list) // learning rate after the steps
        **fields (KEY=VALUE)
 
     '''
@@ -388,13 +391,14 @@ class AdaGrad(Updater):
                step=(0), step_lr=(0.01), **fields):
     '''
     required
-       lr      = (float)  // base learning rate
+       lr       = (float)      // base learning rate
     optional
-       lr_type = (string) // type of learning rate, 'Fixed' at default
-       decay    = (float) // weight decay
-       momentum = (float) // momentum
+       lr_type  = (string)     // type of learning rate, 'Fixed' at default
+       decay    = (float)      // weight decay
+       momentum = (float)      // momentum
+       step     = (int/list)   // steps
+       step_lr  = (float/list) // learning rate after the steps
        **fields (KEY=VALUE)
-
     '''
     assert lr
     super(AdaGrad, self).__init__(upd_type=kAdaGrad,