You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2018/05/16 14:43:12 UTC

[2/2] incubator-singa git commit: update the cifar10 example

update the cifar10 example


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/6bcd5d0e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/6bcd5d0e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/6bcd5d0e

Branch: refs/heads/master
Commit: 6bcd5d0e96802e9c8271919f53f9d4740d9362dc
Parents: bfd8ce9
Author: Wang Wei <dc...@nus.edu.sg>
Authored: Wed May 16 22:42:58 2018 +0800
Committer: Wang Wei <dc...@nus.edu.sg>
Committed: Wed May 16 22:42:58 2018 +0800

----------------------------------------------------------------------
 examples/cifar10/cnn.py   | 18 +++++++++++++-----
 examples/cifar10/train.py | 12 +++++-------
 2 files changed, 18 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/6bcd5d0e/examples/cifar10/cnn.py
----------------------------------------------------------------------
diff --git a/examples/cifar10/cnn.py b/examples/cifar10/cnn.py
index b056e70..66eb9ac 100644
--- a/examples/cifar10/cnn.py
+++ b/examples/cifar10/cnn.py
@@ -22,7 +22,6 @@ validation accuracy would be about 82%.
 from __future__ import print_function
 from builtins import zip
 
-# sys.path.append(os.path.join(os.path.dirname(__file__), '../../build/python'))
 from singa import layer
 from singa import metric
 from singa import loss
@@ -39,19 +38,28 @@ def create_net(use_cpu=False):
     W2_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01, 'decay_mult': 250}
 
     b_specs = {'init': 'constant', 'value': 0, 'lr_mult': 2, 'decay_mult': 0}
-    net.add(layer.Conv2D('conv1', 32, 5, 1, W_specs=W0_specs.copy(), b_specs=b_specs.copy(), pad=2, input_sample_shape=(3,32,32,)))
+    net.add(layer.Conv2D('conv1', 32, 5, 1,
+                         W_specs=W0_specs.copy(),
+                         b_specs=b_specs.copy(), pad=2,
+                         input_sample_shape=(3, 32, 32,)))
     net.add(layer.MaxPooling2D('pool1', 3, 2, pad=1))
     net.add(layer.Activation('relu1'))
     net.add(layer.LRN(name='lrn1', size=3, alpha=5e-5))
-    net.add(layer.Conv2D('conv2', 32, 5, 1, W_specs=W1_specs.copy(), b_specs=b_specs.copy(), pad=2))
+    net.add(layer.Conv2D('conv2', 32, 5, 1,
+                         W_specs=W1_specs.copy(),
+                         b_specs=b_specs.copy(), pad=2))
     net.add(layer.Activation('relu2'))
     net.add(layer.AvgPooling2D('pool2', 3, 2,  pad=1))
     net.add(layer.LRN('lrn2', size=3, alpha=5e-5))
-    net.add(layer.Conv2D('conv3', 64, 5, 1, W_specs=W1_specs.copy(), b_specs=b_specs.copy(), pad=2))
+    net.add(layer.Conv2D('conv3', 64, 5, 1,
+                         W_specs=W1_specs.copy(),
+                         b_specs=b_specs.copy(), pad=2))
     net.add(layer.Activation('relu3'))
     net.add(layer.AvgPooling2D('pool3', 3, 2, pad=1))
     net.add(layer.Flatten('flat'))
-    net.add(layer.Dense( 'dense', 10, W_specs=W2_specs.copy(), b_specs=b_specs.copy()))
+    net.add(layer.Dense('dense', 10,
+                        W_specs=W2_specs.copy(),
+                        b_specs=b_specs.copy()))
     for (p, specs) in zip(net.param_values(), net.param_specs()):
         filler = specs.filler
         if filler.type == 'gaussian':

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/6bcd5d0e/examples/cifar10/train.py
----------------------------------------------------------------------
diff --git a/examples/cifar10/train.py b/examples/cifar10/train.py
index 2a25920..b2ab4af 100644
--- a/examples/cifar10/train.py
+++ b/examples/cifar10/train.py
@@ -31,15 +31,13 @@ import numpy as np
 import os
 import argparse
 
-# sys.path.append(os.path.join(os.path.dirname(__file__), '../../build/python'))
 from singa import utils
 from singa import optimizer
 from singa import device
 from singa import tensor
-from singa.proto import core_pb2
 from caffe import caffe_net
 
-# import alexnet
+import cnn
 import vgg
 import resnet
 
@@ -139,7 +137,7 @@ def train(data, net, max_epoch, get_lr, weight_decay, batch_size=100,
         opt.register(p, specs)
 
     tx = tensor.Tensor((batch_size, 3, 32, 32), dev)
-    ty = tensor.Tensor((batch_size,), dev, core_pb2.kInt)
+    ty = tensor.Tensor((batch_size,), dev, tensor.int32)
     train_x, train_y, test_x, test_y = data
     num_train_batch = train_x.shape[0] // batch_size
     num_test_batch = test_x.shape[0] // batch_size
@@ -181,7 +179,7 @@ def train(data, net, max_epoch, get_lr, weight_decay, batch_size=100,
 
 if __name__ == '__main__':
     parser = argparse.ArgumentParser(description='Train dcnn for cifar10')
-    parser.add_argument('model', choices=['vgg', 'alexnet', 'resnet', 'caffe'],
+    parser.add_argument('model', choices=['vgg', 'cnn', 'resnet', 'caffe'],
                         default='vgg')
     parser.add_argument('data', default='cifar-10-batches-py')
     parser.add_argument('--use_cpu', action='store_true')
@@ -200,9 +198,9 @@ if __name__ == '__main__':
         # for cifar10_quick_train_test.prototxt
         # train((train_x, train_y, test_x, test_y), net, 18, caffe_lr, 0.004,
         #      use_cpu=args.use_cpu)
-    elif args.model == 'alexnet':
+    elif args.model == 'cnn':
         train_x, test_x = normalize_for_alexnet(train_x, test_x)
-        net = alexnet.create_net(args.use_cpu)
+        net = cnn.create_net(args.use_cpu)
         train((train_x, train_y, test_x, test_y), net, 2, alexnet_lr, 0.004,
               use_cpu=args.use_cpu)
     elif args.model == 'vgg':