You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by sk...@apache.org on 2019/07/18 20:54:57 UTC

[incubator-mxnet] branch master updated: Fix AMP Tutorial failures (#15526)

This is an automated email from the ASF dual-hosted git repository.

skm pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new f44fdd6  Fix AMP Tutorial failures (#15526)
f44fdd6 is described below

commit f44fdd683112d593d239847b1fa6932f5ab5c6c0
Author: Anirudh Subramanian <an...@apache.org>
AuthorDate: Thu Jul 18 13:54:11 2019 -0700

    Fix AMP Tutorial failures (#15526)
---
 docs/tutorials/amp/amp_tutorial.md | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/docs/tutorials/amp/amp_tutorial.md b/docs/tutorials/amp/amp_tutorial.md
index 5f9bd11..9da0505 100644
--- a/docs/tutorials/amp/amp_tutorial.md
+++ b/docs/tutorials/amp/amp_tutorial.md
@@ -262,19 +262,20 @@ Below, we demonstrate for a gluon model and a symbolic model:
 ```python
 with mx.Context(mx.gpu(0)):
     # Below is an example of converting a gluon hybrid block to a mixed precision block
-    model = get_model("resnet50_v1")
-    model.collect_params().initialize(ctx=mx.current_context())
-    model.hybridize()
-    model(mx.nd.zeros((1, 3, 224, 224)))
-    converted_model = amp.convert_hybrid_block(model)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter("ignore")
+        model = get_model("resnet50_v1")
+        model.collect_params().initialize(ctx=mx.current_context())
+        model.hybridize()
+        model(mx.nd.zeros((1, 3, 224, 224)))
+        converted_model = amp.convert_hybrid_block(model)
 
     # Run dummy inference with the converted gluon model
     result = converted_model.forward(mx.nd.random.uniform(shape=(1, 3, 224, 224),
                                                           dtype=np.float32))
 
     # Below is an example of converting a symbolic model to a mixed precision model
-    dir_path = os.path.dirname(os.path.realpath(__file__))
-    model_path = os.path.join(dir_path, 'model')
+    model_path = "model"
     if not os.path.isdir(model_path):
         os.mkdir(model_path)
     prefix, epoch = mx.test_utils.download_model("imagenet1k-resnet-18", dst_dir=model_path)
@@ -301,8 +302,7 @@ for symbolic model. You can do the same for gluon hybrid block with `amp.convert
 with mx.Context(mx.gpu(0)):
     # Below is an example of converting a symbolic model to a mixed precision model
     # with only Convolution op being force casted to FP16.
-    dir_path = os.path.dirname(os.path.realpath(__file__))
-    model_path = os.path.join(dir_path, 'model')
+    model_path = "model"
     if not os.path.isdir(model_path):
         os.mkdir(model_path)
     prefix, epoch = mx.test_utils.download_model("imagenet1k-resnet-18", dst_dir=model_path)