You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by zh...@apache.org on 2022/09/01 13:46:39 UTC

[singa] branch dev updated: update Relu layer for xceptionnet in cifar_distributed_cnn example

This is an automated email from the ASF dual-hosted git repository.

zhaojing pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/singa.git


The following commit(s) were added to refs/heads/dev by this push:
     new ae06e327 update Relu layer for xceptionnet in cifar_distributed_cnn example
     new 4bbd339c Merge pull request #997 from KimballCai/dev
ae06e327 is described below

commit ae06e32763d9ba07b826bb0763e9f306faad1ab7
Author: qingpeng <qi...@u.nus.edu>
AuthorDate: Thu Sep 1 14:15:28 2022 +0800

    update Relu layer for xceptionnet in cifar_distributed_cnn example
---
 examples/cifar_distributed_cnn/autograd/xceptionnet.py | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/examples/cifar_distributed_cnn/autograd/xceptionnet.py b/examples/cifar_distributed_cnn/autograd/xceptionnet.py
index 357e47d5..ce28640c 100644
--- a/examples/cifar_distributed_cnn/autograd/xceptionnet.py
+++ b/examples/cifar_distributed_cnn/autograd/xceptionnet.py
@@ -140,7 +140,7 @@ class Xception(layer.Layer):
         self.conv2 = layer.Conv2d(32, 64, 3, 1, 1, bias=False)
         self.bn2 = layer.BatchNorm2d(64)
         self.relu2 = layer.ReLU()
-        # do relu here
+        # Relu Layer
 
         self.block1 = Block(64,
                             128,
@@ -225,7 +225,7 @@ class Xception(layer.Layer):
         self.bn3 = layer.BatchNorm2d(1536)
         self.relu3 = layer.ReLU()
 
-        # do relu here
+        # Relu Layer
         self.conv4 = layer.SeparableConv2d(1536, 2048, 3, 1, 1)
         self.bn4 = layer.BatchNorm2d(2048)
 
@@ -279,9 +279,8 @@ class Xception(layer.Layer):
 
 if __name__ == '__main__':
     model = Xception(num_classes=1000)
-    print('Start intialization............')
+    print('Start initialization............')
     dev = device.create_cuda_gpu_on(0)
-    #dev = device.create_cuda_gpu()
 
     niters = 20
     batch_size = 16