You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by zh...@apache.org on 2020/10/21 17:43:11 UTC

[incubator-mxnet] branch master updated: Move AMP from contrib to core (#19347)

This is an automated email from the ASF dual-hosted git repository.

zhasheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 9e9f972  Move AMP from contrib to core (#19347)
9e9f972 is described below

commit 9e9f972d3b59dbed7667495300707cd71a9f206c
Author: mk-61 <56...@users.noreply.github.com>
AuthorDate: Wed Oct 21 10:41:09 2020 -0700

    Move AMP from contrib to core (#19347)
    
    * Move AMP from contrib to core
    
    * Update tutorial to import AMP from core
    
    Co-authored-by: Vladimir Cherepanov <vc...@nvidia.com>
---
 .../python/tutorials/performance/backend/amp.md    |  2 +-
 .../amp_model_conversion.py                        |  2 +-
 python/mxnet/{contrib => }/amp/__init__.py         |  0
 python/mxnet/{contrib => }/amp/amp.py              | 28 +++++++++++-----------
 python/mxnet/{contrib => }/amp/lists/__init__.py   |  0
 .../mxnet/{contrib => }/amp/lists/symbol_bf16.py   |  0
 .../mxnet/{contrib => }/amp/lists/symbol_fp16.py   |  2 +-
 python/mxnet/{contrib => }/amp/loss_scaler.py      |  6 ++---
 src/operator/{contrib => }/all_finite-inl.h        | 18 +++++++-------
 src/operator/{contrib => }/all_finite.cc           |  0
 src/operator/{contrib => }/all_finite.cu           |  0
 src/operator/{contrib => }/amp_graph_pass.cc       |  0
 .../gpu/{test_contrib_amp.py => test_amp.py}       |  5 ++--
 .../mkl/{test_contrib_amp.py => test_amp.py}       |  5 ++--
 tests/python/mkl/test_bf16_operator.py             |  3 +--
 15 files changed, 34 insertions(+), 37 deletions(-)

diff --git a/docs/python_docs/python/tutorials/performance/backend/amp.md b/docs/python_docs/python/tutorials/performance/backend/amp.md
index 5bd9ad3..2d572a8 100644
--- a/docs/python_docs/python/tutorials/performance/backend/amp.md
+++ b/docs/python_docs/python/tutorials/performance/backend/amp.md
@@ -177,7 +177,7 @@ In order to start using AMP, we need to import and initialize it. This has to ha
 
 
 ```{.python .input}
-from mxnet.contrib import amp
+from mxnet import amp
 
 amp.init()
 ```
diff --git a/example/automatic-mixed-precision/amp_model_conversion.py b/example/automatic-mixed-precision/amp_model_conversion.py
index d0e625b..22af4f3 100644
--- a/example/automatic-mixed-precision/amp_model_conversion.py
+++ b/example/automatic-mixed-precision/amp_model_conversion.py
@@ -22,7 +22,7 @@ import mxnet as mx
 from common import modelzoo
 import gluoncv
 from gluoncv.model_zoo import get_model
-from mxnet.contrib.amp import amp
+from mxnet import amp
 import numpy as np
 
 
diff --git a/python/mxnet/contrib/amp/__init__.py b/python/mxnet/amp/__init__.py
similarity index 100%
rename from python/mxnet/contrib/amp/__init__.py
rename to python/mxnet/amp/__init__.py
diff --git a/python/mxnet/contrib/amp/amp.py b/python/mxnet/amp/amp.py
similarity index 98%
rename from python/mxnet/contrib/amp/amp.py
rename to python/mxnet/amp/amp.py
index 5fde733..ad60478 100644
--- a/python/mxnet/contrib/amp/amp.py
+++ b/python/mxnet/amp/amp.py
@@ -32,21 +32,21 @@ import sys
 import numpy as np
 
 from mxnet import numpy
-from ... import symbol
-from ...context import gpu
-from ...symbol import Symbol
-from ...symbol import contrib as symbol_contrib
-from ... import ndarray
-from ...ndarray import NDArray, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
+from .. import symbol
+from ..context import gpu
+from ..symbol import Symbol
+from ..symbol import contrib as symbol_contrib
+from .. import ndarray
+from ..ndarray import NDArray, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
 from . import lists
-from ...gluon import Block, trainer
-from ... import base
-from ...base import (_NP_OP_PREFIX, _NP_OP_SUBMODULE_LIST, _NP_EXT_OP_PREFIX,
-                     _NP_EXT_OP_SUBMODULE_LIST, _NP_INTERNAL_OP_PREFIX,
-                     c_str_array, SymbolHandle, check_call, _LIB, mx_uint, c_array_buf)
-from ... import optimizer as opt
+from ..gluon import Block, trainer
+from .. import base
+from ..base import (_NP_OP_PREFIX, _NP_OP_SUBMODULE_LIST, _NP_EXT_OP_PREFIX,
+                    _NP_EXT_OP_SUBMODULE_LIST, _NP_INTERNAL_OP_PREFIX,
+                    c_str_array, SymbolHandle, check_call, _LIB, mx_uint, c_array_buf)
+from .. import optimizer as opt
 from .loss_scaler import LossScaler
-from ...operator import get_all_registered_operators_grouped
+from ..operator import get_all_registered_operators_grouped
 
 bfloat16 = np.dtype([('bfloat16', np.uint16)])
 
@@ -701,7 +701,7 @@ def convert_hybrid_block(block, target_dtype="float16", target_dtype_ops=None,
         because of a cast layer following it, but will reduce the computation and memory
         overhead of the model if casted.
     """
-    from ...gluon import HybridBlock, SymbolBlock
+    from ..gluon import HybridBlock, SymbolBlock
     assert isinstance(block, HybridBlock), "block input should be a HybridBlock"
     if not block._cached_graph:
         raise RuntimeError(
diff --git a/python/mxnet/contrib/amp/lists/__init__.py b/python/mxnet/amp/lists/__init__.py
similarity index 100%
rename from python/mxnet/contrib/amp/lists/__init__.py
rename to python/mxnet/amp/lists/__init__.py
diff --git a/python/mxnet/contrib/amp/lists/symbol_bf16.py b/python/mxnet/amp/lists/symbol_bf16.py
similarity index 100%
rename from python/mxnet/contrib/amp/lists/symbol_bf16.py
rename to python/mxnet/amp/lists/symbol_bf16.py
diff --git a/python/mxnet/contrib/amp/lists/symbol_fp16.py b/python/mxnet/amp/lists/symbol_fp16.py
similarity index 99%
rename from python/mxnet/contrib/amp/lists/symbol_fp16.py
rename to python/mxnet/amp/lists/symbol_fp16.py
index db608e4..b7e3dcb 100644
--- a/python/mxnet/contrib/amp/lists/symbol_fp16.py
+++ b/python/mxnet/amp/lists/symbol_fp16.py
@@ -18,7 +18,7 @@
 # coding: utf-8
 """Lists of functions whitelisted/blacklisted for automatic mixed precision in symbol API."""
 
-from ....runtime import Features
+from ...runtime import Features
 
 
 # Functions that should be cast to lower precision
diff --git a/python/mxnet/contrib/amp/loss_scaler.py b/python/mxnet/amp/loss_scaler.py
similarity index 96%
rename from python/mxnet/contrib/amp/loss_scaler.py
rename to python/mxnet/amp/loss_scaler.py
index 771408e..1e464ff 100644
--- a/python/mxnet/contrib/amp/loss_scaler.py
+++ b/python/mxnet/amp/loss_scaler.py
@@ -19,9 +19,9 @@
 """Dynamic loss scaler for AMP."""
 import logging
 
-from ... import autograd as ag
-from ... import ndarray
-from ...util import is_np_array
+from .. import autograd as ag
+from .. import ndarray
+from ..util import is_np_array
 
 class LossScaler(object):
     """Dynamic loss scaler for AMP.
diff --git a/src/operator/contrib/all_finite-inl.h b/src/operator/all_finite-inl.h
old mode 100755
new mode 100644
similarity index 88%
rename from src/operator/contrib/all_finite-inl.h
rename to src/operator/all_finite-inl.h
index cf63fce..d646d5b
--- a/src/operator/contrib/all_finite-inl.h
+++ b/src/operator/all_finite-inl.h
@@ -24,8 +24,8 @@
  * \author Clement Fuji Tsang
  */
 
-#ifndef MXNET_OPERATOR_CONTRIB_ALL_FINITE_INL_H_
-#define MXNET_OPERATOR_CONTRIB_ALL_FINITE_INL_H_
+#ifndef MXNET_OPERATOR_ALL_FINITE_INL_H_
+#define MXNET_OPERATOR_ALL_FINITE_INL_H_
 #include <dmlc/parameter.h>
 #include <mxnet/operator.h>
 #include <mxnet/operator_util.h>
@@ -34,12 +34,12 @@
 #include <nnvm/op.h>
 #include <nnvm/op_attr_types.h>
 #include <vector>
-#include "../operator_common.h"
-#include "../mshadow_op.h"
-#include "../elemwise_op_common.h"
-#include "../mxnet_op.h"
-#include "../tensor/init_op.h"
-#include "../tensor/util/tensor_util-inl.h"
+#include "operator_common.h"
+#include "mshadow_op.h"
+#include "elemwise_op_common.h"
+#include "mxnet_op.h"
+#include "tensor/init_op.h"
+#include "tensor/util/tensor_util-inl.h"
 
 namespace mxnet {
 namespace op {
@@ -97,4 +97,4 @@ MultiAllFiniteKernelParam<DType> FillMultiAllFiniteParam(const MultiAllFinitePar
 }  // namespace op
 }  // namespace mxnet
 
-#endif  // MXNET_OPERATOR_CONTRIB_ALL_FINITE_INL_H_
+#endif  // MXNET_OPERATOR_ALL_FINITE_INL_H_
diff --git a/src/operator/contrib/all_finite.cc b/src/operator/all_finite.cc
old mode 100755
new mode 100644
similarity index 100%
rename from src/operator/contrib/all_finite.cc
rename to src/operator/all_finite.cc
diff --git a/src/operator/contrib/all_finite.cu b/src/operator/all_finite.cu
old mode 100755
new mode 100644
similarity index 100%
rename from src/operator/contrib/all_finite.cu
rename to src/operator/all_finite.cu
diff --git a/src/operator/contrib/amp_graph_pass.cc b/src/operator/amp_graph_pass.cc
similarity index 100%
rename from src/operator/contrib/amp_graph_pass.cc
rename to src/operator/amp_graph_pass.cc
diff --git a/tests/python/gpu/test_contrib_amp.py b/tests/python/gpu/test_amp.py
similarity index 98%
rename from tests/python/gpu/test_contrib_amp.py
rename to tests/python/gpu/test_amp.py
index 99bab71..237cb1b 100644
--- a/tests/python/gpu/test_contrib_amp.py
+++ b/tests/python/gpu/test_amp.py
@@ -23,12 +23,11 @@ from random import randint
 import warnings
 import collections
 import ctypes
-import mxnet.contrib.amp as amp
+from mxnet import amp
 import pytest
 from mxnet.test_utils import set_default_context, same_symbol_structure
 from mxnet.gluon.model_zoo.vision import get_model
 from mxnet.gluon import SymbolBlock, nn, rnn
-from mxnet.contrib.amp import amp
 from mxnet.operator import get_all_registered_operators_grouped
 curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
 sys.path.insert(0, os.path.join(curr_path, '../unittest'))
@@ -94,7 +93,7 @@ def test_amp_coverage(amp_tests):
                      safest option"""
     diff = required - covered
     assert not diff, f"{len(diff)} operators {sorted(diff)} do not exist in AMP lists (in " \
-        f"python/mxnet/contrib/amp/lists/symbol_fp16.py) - please add them. " \
+        f"python/mxnet/amp/lists/symbol_fp16.py) - please add them. " \
         f"\n{guidelines}"
 
 @with_seed()
diff --git a/tests/python/mkl/test_contrib_amp.py b/tests/python/mkl/test_amp.py
similarity index 98%
rename from tests/python/mkl/test_contrib_amp.py
rename to tests/python/mkl/test_amp.py
index fed72e2..52b6286 100644
--- a/tests/python/mkl/test_contrib_amp.py
+++ b/tests/python/mkl/test_amp.py
@@ -23,12 +23,11 @@ from random import randint
 import warnings
 import collections
 import ctypes
-import mxnet.contrib.amp as amp
+from mxnet import amp
 import pytest
 from mxnet.test_utils import set_default_context, same_symbol_structure, assert_almost_equal
 from mxnet.gluon.model_zoo.vision import get_model
 from mxnet.gluon import SymbolBlock, nn, rnn
-from mxnet.contrib.amp import amp
 curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
 sys.path.insert(0, os.path.join(curr_path, '../unittest'))
 from common import with_seed
@@ -76,7 +75,7 @@ def test_amp_coverage():
 
     if ret1 != set():
         warnings.warn("Operators " + str(ret1) + " do not exist in AMP lists (in "
-                       "python/mxnet/contrib/amp/lists/symbol_bf16.py) - please add them. "
+                       "python/mxnet/amp/lists/symbol_bf16.py) - please add them. "
                        """Please follow these guidelines for choosing a proper list:
                        - if your operator is not to be used in a computational graph
                          (e.g. image manipulation operators, optimizers) or does not have
diff --git a/tests/python/mkl/test_bf16_operator.py b/tests/python/mkl/test_bf16_operator.py
index 3455c87..5cd9dbc 100644
--- a/tests/python/mkl/test_bf16_operator.py
+++ b/tests/python/mkl/test_bf16_operator.py
@@ -24,11 +24,10 @@ import warnings
 import collections
 import ctypes
 import itertools
-import mxnet.contrib.amp as amp
+from mxnet import amp
 from mxnet.test_utils import set_default_context, same_symbol_structure, assert_almost_equal_with_err, rand_shape_nd
 from mxnet.gluon.model_zoo.vision import get_model
 from mxnet.gluon import SymbolBlock, nn, rnn
-from mxnet.contrib.amp import amp
 curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
 sys.path.insert(0, os.path.join(curr_path, '../unittest'))
 from common import with_seed