You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by ch...@apache.org on 2021/03/27 05:16:47 UTC

[singa] branch dev updated: Update the CNN example benchmark

This is an automated email from the ASF dual-hosted git repository.

chrishkchris pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/singa.git


The following commit(s) were added to refs/heads/dev by this push:
     new 3b1dd14  Update the CNN example benchmark
     new 358cf67  Merge pull request #839 from lzjpaul/cnn-benchmark
3b1dd14 is described below

commit 3b1dd14729009d70f116dc874e393be70bdbf246
Author: zhaojing <zh...@comp.nus.edu.sg>
AuthorDate: Fri Mar 26 20:41:39 2021 +0800

    Update the CNN example benchmark
---
 examples/cnn/benchmark.py          | 6 +++---
 examples/cnn/train_cnn.py          | 2 +-
 examples/cnn/train_mpi.py          | 2 +-
 examples/cnn/train_multiprocess.py | 2 +-
 4 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/examples/cnn/benchmark.py b/examples/cnn/benchmark.py
index a182139..64e8951 100644
--- a/examples/cnn/benchmark.py
+++ b/examples/cnn/benchmark.py
@@ -32,7 +32,7 @@ from tqdm import trange
 
 def train_resnet(DIST=True, graph=True, sequential=False, verbosity=0):
 
-    # Define the hypermeters good for the train_resnet
+    # Define the hypermeters for the train_resnet
     niters = 100
     batch_size = 32
     sgd = opt.SGD(lr=0.1, momentum=0.9, weight_decay=1e-5)
@@ -64,7 +64,7 @@ def train_resnet(DIST=True, graph=True, sequential=False, verbosity=0):
     dev.SetVerbosity(verbosity)
     dev.SetSkipIteration(5)
 
-    # construct the model
+    # Construct the model
     from model import resnet
     model = resnet.resnet50(num_channels=3, num_classes=1000)
 
@@ -72,7 +72,7 @@ def train_resnet(DIST=True, graph=True, sequential=False, verbosity=0):
     model.set_optimizer(sgd)
     model.compile([tx], is_train=True, use_graph=graph, sequential=sequential)
 
-    # train model
+    # Train model
     dev.Sync()
     start = time.time()
     with trange(niters) as t:
diff --git a/examples/cnn/train_cnn.py b/examples/cnn/train_cnn.py
index fa82a9e..bcccc51 100644
--- a/examples/cnn/train_cnn.py
+++ b/examples/cnn/train_cnn.py
@@ -153,7 +153,7 @@ def run(global_rank,
         model = model.create_model(data_size=data_size,
                                     num_classes=num_classes)
 
-    # For distributed training, sequential gives better performance
+    # For distributed training, sequential has better performance
     if hasattr(sgd, "communicator"):
         DIST = True
         sequential = True
diff --git a/examples/cnn/train_mpi.py b/examples/cnn/train_mpi.py
index 4f71dad..563d4b2 100644
--- a/examples/cnn/train_mpi.py
+++ b/examples/cnn/train_mpi.py
@@ -27,7 +27,7 @@ import train_cnn
 singa_dtype = {"float16": tensor.float16, "float32": tensor.float32}
 
 if __name__ == '__main__':
-    # use argparse to get command config: max_epoch, model, data, etc. for single gpu training
+    # Use argparse to get command config: max_epoch, model, data, etc., for single gpu training
     parser = argparse.ArgumentParser(
         description='Training using the autograd and graph.')
     parser.add_argument('model',
diff --git a/examples/cnn/train_multiprocess.py b/examples/cnn/train_multiprocess.py
index 50ac6ca..182dd35 100644
--- a/examples/cnn/train_multiprocess.py
+++ b/examples/cnn/train_multiprocess.py
@@ -36,7 +36,7 @@ def run(args, local_rank, world_size, nccl_id):
 
 
 if __name__ == '__main__':
-    # use argparse to get command config: max_epoch, model, data, etc. for single gpu training
+    # Use argparse to get command config: max_epoch, model, data, etc., for single gpu training
     parser = argparse.ArgumentParser(
         description='Training using the autograd and graph.')
     parser.add_argument('model',