You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by sk...@apache.org on 2021/04/02 19:51:53 UTC

[incubator-mxnet] branch v1.x updated: ONNX Graduation (#20094)

This is an automated email from the ASF dual-hosted git repository.

skm pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
     new 852f962  ONNX Graduation (#20094)
852f962 is described below

commit 852f962e3b9072c6bf1dd0fd113067bd9669344b
Author: Zhaoqi Zhu <zh...@gmail.com>
AuthorDate: Fri Apr 2 12:49:16 2021 -0700

    ONNX Graduation (#20094)
    
    * move mx2onnx and add deprecation message
    
    * fix
    
    * change test import path
    
    * ad license
    
    * refactory and add setup.py
    
    * fix dependency
    
    * add license to setup.py
    
    * white-list for header check
    
    * white-list fix
    
    * add readme
    
    * fix sanity
    
    * fix sanity
    
    * fix return
---
 python/mxnet/contrib/onnx/__init__.py              | 38 +++++++++++++++++--
 python/mxnet/onnx/README.md                        | 37 ++++++++++++++++++
 .../{contrib/onnx/mx2onnx => onnx}/__init__.py     |  4 +-
 python/mxnet/{contrib => }/onnx/mx2onnx/LICENSE    |  0
 .../mxnet/{contrib => }/onnx/mx2onnx/__init__.py   |  3 +-
 .../{contrib => }/onnx/mx2onnx/_export_helper.py   |  0
 .../mx2onnx/_export_model.py}                      |  6 +--
 .../mx2onnx/_export_onnx.py}                       |  2 +-
 .../{contrib => }/onnx/mx2onnx/_op_translations.py |  2 +-
 .../onnx/mx2onnx/__init__.py => onnx/setup.py}     | 28 +++++++++++---
 tests/python-pytest/onnx/backend.py                |  2 +-
 tests/python-pytest/onnx/test_onnxruntime_cv.py    | 10 ++---
 tests/python-pytest/onnx/test_onnxruntime_nlp.py   | 44 +++++++++++-----------
 tests/python-pytest/onnx/test_operators.py         |  4 +-
 tools/license_header.py                            |  4 ++
 15 files changed, 136 insertions(+), 48 deletions(-)

diff --git a/python/mxnet/contrib/onnx/__init__.py b/python/mxnet/contrib/onnx/__init__.py
index 9f27060..30ac62b 100644
--- a/python/mxnet/contrib/onnx/__init__.py
+++ b/python/mxnet/contrib/onnx/__init__.py
@@ -16,6 +16,38 @@
 # under the License.
 """Module for ONNX model format support for Apache MXNet."""
 
-from .onnx2mx.import_model import import_model, get_model_metadata
-from .onnx2mx.import_to_gluon import import_to_gluon
-from .mx2onnx.export_model import export_model
+from .onnx2mx.import_model import import_model as import_model_
+from .onnx2mx.import_model import get_model_metadata as get_model_metadata_
+from .onnx2mx.import_to_gluon import import_to_gluon as import_to_gluon_
+from ...onnx import export_model as export_model_
+
+def import_model(*args, **kwargs):
+    print('Calling mxnet.contrib.onnx.import_model...')
+    print('Please be advised that importing ONNX models into MXNet is going to be deprecated '
+          'in the upcoming MXNet v1.10 release. The following apis will be deleted: '
+          'mxnet.contrib.onnx.import_model/get_model_metadata/import_to_gluon.')
+    return import_model_(*args, **kwargs)
+
+
+def get_model_metadata(*args, **kwargs):
+    print('Calling mxnet.contrib.onnx.get_model_metadata...')
+    print('Please be advised that importing ONNX models into MXNet is going to be deprecated '
+          'in the upcoming MXNet v1.10 release. The following apis will be deleted: '
+          'mxnet.contrib.onnx.import_model/get_model_metadata/import_to_gluon.')
+    return get_model_metadata_(*args, **kwargs)
+
+
+def import_to_gluon(*args, **kwargs):
+    print('Calling mxnet.contrib.onnx.import_to_gluon...')
+    print('Please be advised that importing ONNX models into MXNet is going to be deprecated '
+          'in the upcoming MXNet v1.10 release. The following apis will be deleted: '
+          'mxnet.contrib.onnx.import_model/get_model_metadata/import_to_gluon.')
+    return import_to_gluon_(*args, **kwargs)
+
+
+def export_model(*args, **kwargs):
+    print('Calling mxnet.contrib.onnx.export_model...')
+    print('Please be advised that the ONNX module has been moved to mxnet.onnx and '
+          'mxnet.onnx.export_model is the preferred path. The current path will be deprecated '
+          'in the upcoming MXNet v1.10 release.')
+    return export_model_(*args, **kwargs)
diff --git a/python/mxnet/onnx/README.md b/python/mxnet/onnx/README.md
new file mode 100644
index 0000000..e46a332
--- /dev/null
+++ b/python/mxnet/onnx/README.md
@@ -0,0 +1,37 @@
+<!--- Licensed to the Apache Software Foundation (ASF) under one -->
+<!--- or more contributor license agreements.  See the NOTICE file -->
+<!--- distributed with this work for additional information -->
+<!--- regarding copyright ownership.  The ASF licenses this file -->
+<!--- to you under the Apache License, Version 2.0 (the -->
+<!--- "License"); you may not use this file except in compliance -->
+<!--- with the License.  You may obtain a copy of the License at -->
+
+<!---   http://www.apache.org/licenses/LICENSE-2.0 -->
+
+<!--- Unless required by applicable law or agreed to in writing, -->
+<!--- software distributed under the License is distributed on an -->
+<!--- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -->
+<!--- KIND, either express or implied.  See the License for the -->
+<!--- specific language governing permissions and limitations -->
+
+# ONNX Export Support for MXNet
+
+### Overview
+[ONNX](https://onnx.ai/), or Open Neural Network Exchange, is an open source deep learning model format that acts as a framework neutral graph representation between DL frameworks or between training and inference. With the ability to export models to the ONNX format, MXNet users can enjoy faster inference and a wider range of deployment device choices, including edge and mobile devices where MXNet installation may be constrained. Popular hardware-accelerated and/or cross-platform ONNX r [...]
+
+### ONNX Versions Supported
+ONNX 1.7 -- Fully Supported
+ONNX 1.8 -- Work in Progress
+
+### Installation
+From the 1.9 release and on, the ONNX export module has become an offical, built-in module in MXNet. You can access the module at `mxnet.onnx`. 
+
+If you are a user of earlier MXNet versions and do not want to upgrade MXNet, you can still enjoy the latest ONNX suppor by pulling the MXNet source code and building the wheel for only the mx2onnx module. Just do `cd python/mxnet/onnx` and then build the wheel with `python3 -m build`. You should be able to find the wheel under `python/mxnet/onnx/dist/mx2onnx-0.0.0-py3-none-any.whl` and install it with `pip install mx2onnx-0.0.0-py3-none-any.whl`. You should be able to access the module  [...]
+
+### APIs
+
+### Operator Support Matrix - ONNX 1.7
+
+### GluonCV Pretrained Model Support Matrix
+
+### GluonNLP Pretrained Model Support Matrix
diff --git a/python/mxnet/contrib/onnx/mx2onnx/__init__.py b/python/mxnet/onnx/__init__.py
similarity index 90%
copy from python/mxnet/contrib/onnx/mx2onnx/__init__.py
copy to python/mxnet/onnx/__init__.py
index 779ce86..3caab21 100644
--- a/python/mxnet/contrib/onnx/mx2onnx/__init__.py
+++ b/python/mxnet/onnx/__init__.py
@@ -18,6 +18,4 @@
 # coding: utf-8
 """ONNX Export module"""
 
-from . import export_model
-from . import export_onnx
-from . import _op_translations
+from .mx2onnx import export_model
diff --git a/python/mxnet/contrib/onnx/mx2onnx/LICENSE b/python/mxnet/onnx/mx2onnx/LICENSE
similarity index 100%
rename from python/mxnet/contrib/onnx/mx2onnx/LICENSE
rename to python/mxnet/onnx/mx2onnx/LICENSE
diff --git a/python/mxnet/contrib/onnx/mx2onnx/__init__.py b/python/mxnet/onnx/mx2onnx/__init__.py
similarity index 94%
copy from python/mxnet/contrib/onnx/mx2onnx/__init__.py
copy to python/mxnet/onnx/mx2onnx/__init__.py
index 779ce86..d8a6d5a 100644
--- a/python/mxnet/contrib/onnx/mx2onnx/__init__.py
+++ b/python/mxnet/onnx/mx2onnx/__init__.py
@@ -18,6 +18,5 @@
 # coding: utf-8
 """ONNX Export module"""
 
-from . import export_model
-from . import export_onnx
+from ._export_model import export_model
 from . import _op_translations
diff --git a/python/mxnet/contrib/onnx/mx2onnx/_export_helper.py b/python/mxnet/onnx/mx2onnx/_export_helper.py
similarity index 100%
rename from python/mxnet/contrib/onnx/mx2onnx/_export_helper.py
rename to python/mxnet/onnx/mx2onnx/_export_helper.py
diff --git a/python/mxnet/contrib/onnx/mx2onnx/export_model.py b/python/mxnet/onnx/mx2onnx/_export_model.py
similarity index 98%
rename from python/mxnet/contrib/onnx/mx2onnx/export_model.py
rename to python/mxnet/onnx/mx2onnx/_export_model.py
index 1c50db5..d9be998 100644
--- a/python/mxnet/contrib/onnx/mx2onnx/export_model.py
+++ b/python/mxnet/onnx/mx2onnx/_export_model.py
@@ -22,9 +22,9 @@
 import logging
 import numpy as np
 
-from ....base import string_types
-from .... import symbol
-from .export_onnx import MXNetGraph
+from mxnet.base import string_types
+from mxnet import symbol
+from ._export_onnx import MXNetGraph
 from ._export_helper import load_module
 
 
diff --git a/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py b/python/mxnet/onnx/mx2onnx/_export_onnx.py
similarity index 99%
rename from python/mxnet/contrib/onnx/mx2onnx/export_onnx.py
rename to python/mxnet/onnx/mx2onnx/_export_onnx.py
index 4cec698..903b0cd 100644
--- a/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py
+++ b/python/mxnet/onnx/mx2onnx/_export_onnx.py
@@ -50,7 +50,7 @@
 import logging
 import json
 
-from .... import ndarray as nd
+from mxnet import ndarray as nd
 
 
 class MXNetGraph(object):
diff --git a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py b/python/mxnet/onnx/mx2onnx/_op_translations.py
similarity index 99%
rename from python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
rename to python/mxnet/onnx/mx2onnx/_op_translations.py
index eb91a3c..ef65fa2 100644
--- a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
+++ b/python/mxnet/onnx/mx2onnx/_op_translations.py
@@ -56,7 +56,7 @@ Add new functions here with a decorator.
 import re
 import logging
 import numpy as np
-from .export_onnx import MXNetGraph as mx_op
+from ._export_onnx import MXNetGraph as mx_op
 try:
     import onnx
 except ImportError:
diff --git a/python/mxnet/contrib/onnx/mx2onnx/__init__.py b/python/mxnet/onnx/setup.py
similarity index 56%
rename from python/mxnet/contrib/onnx/mx2onnx/__init__.py
rename to python/mxnet/onnx/setup.py
index 779ce86..d0ef233 100644
--- a/python/mxnet/contrib/onnx/mx2onnx/__init__.py
+++ b/python/mxnet/onnx/setup.py
@@ -15,9 +15,27 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# coding: utf-8
-"""ONNX Export module"""
+"""
+setup.py for mx2onnx
+"""
 
-from . import export_model
-from . import export_onnx
-from . import _op_translations
+from setuptools import setup, find_packages
+
+setup(
+    name='mx2onnx',
+    version='0.0.0',
+    description='Module to convert MXNet models to the ONNX format',
+    author='',
+    author_email='',
+    url='https://github.com/apache/incubator-mxnet/tree/v1.x/python/mxnet/onnx',
+    install_requires=[
+        'onnx >= 1.7.0',
+    ],
+    classifiers=[
+        'Intended Audience :: Developers',
+        'License :: OSI Approved :: Apache Software License',
+        'Programming Language :: Python :: 3 :: Only',
+    ],
+    packages=find_packages(),
+    python_requires='>=3.6'
+)
diff --git a/tests/python-pytest/onnx/backend.py b/tests/python-pytest/onnx/backend.py
index 6d8b1af..d294b93 100644
--- a/tests/python-pytest/onnx/backend.py
+++ b/tests/python-pytest/onnx/backend.py
@@ -19,7 +19,7 @@
 """MXNet/Gluon backend wrapper for onnx test infrastructure"""
 
 from mxnet.contrib.onnx.onnx2mx.import_onnx import GraphProto
-from mxnet.contrib.onnx.mx2onnx.export_onnx import MXNetGraph
+from mxnet.onnx.export_onnx import MXNetGraph
 import mxnet as mx
 import numpy as np
 
diff --git a/tests/python-pytest/onnx/test_onnxruntime_cv.py b/tests/python-pytest/onnx/test_onnxruntime_cv.py
index fd6284a..8fe563a 100644
--- a/tests/python-pytest/onnx/test_onnxruntime_cv.py
+++ b/tests/python-pytest/onnx/test_onnxruntime_cv.py
@@ -50,15 +50,15 @@ class GluonModel():
 
     def export_onnx(self):
         onnx_file = self.modelpath + ".onnx"
-        mx.contrib.onnx.export_model(self.modelpath + "-symbol.json", self.modelpath + "-0000.params",
-                                     [self.input_shape], self.input_dtype, onnx_file)
+        mx.onnx.export_model(self.modelpath + "-symbol.json", self.modelpath + "-0000.params",
+                             [self.input_shape], self.input_dtype, onnx_file)
         return onnx_file
 
     def export_onnx_dynamic(self, dynamic_input_shapes):
         onnx_file = self.modelpath + ".onnx"
-        mx.contrib.onnx.export_model(self.modelpath + "-symbol.json", self.modelpath + "-0000.params",
-                                     [self.input_shape], self.input_dtype, onnx_file, dynamic=True,
-                                     dynamic_input_shapes=dynamic_input_shapes)
+        mx.onnx.export_model(self.modelpath + "-symbol.json", self.modelpath + "-0000.params",
+                             [self.input_shape], self.input_dtype, onnx_file, dynamic=True,
+                             dynamic_input_shapes=dynamic_input_shapes)
         return onnx_file
 
     def predict(self, data):
diff --git a/tests/python-pytest/onnx/test_onnxruntime_nlp.py b/tests/python-pytest/onnx/test_onnxruntime_nlp.py
index ecd94df..d2d5f58 100644
--- a/tests/python-pytest/onnx/test_onnxruntime_nlp.py
+++ b/tests/python-pytest/onnx/test_onnxruntime_nlp.py
@@ -63,9 +63,9 @@ def test_roberta_inference_onnxruntime(tmp_path, model_name):
         params_file = "%s-0000.params" % prefix
         onnx_file = "%s.onnx" % prefix
         input_shapes = [(batch, seq_length), (batch,), (batch, num_masked_positions)]
-        converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes,
-                                                            [np.float32, np.float32, np.int32],
-                                                            onnx_file, verbose=True)
+        converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes,
+                                                    [np.float32, np.float32, np.int32],
+                                                    onnx_file, verbose=True)
 
         sess_options = onnxruntime.SessionOptions()
         sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
@@ -120,7 +120,7 @@ def test_bert_inference_onnxruntime(tmp_path, model):
 
         input_shapes = [(batch, seq_length), (batch, seq_length), (batch,)]
         input_types = [np.float32, np.float32, np.float32]
-        converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes, input_types, onnx_file)
+        converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes, input_types, onnx_file)
 
 
         # create onnxruntime session using the generated onnx file
@@ -169,9 +169,9 @@ def test_distilbert_inference_onnxruntime(tmp_path, model_name):
         onnx_file = "%s.onnx" % prefix
 
         input_shapes = [(batch, seq_length), (batch,)]
-        converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes,
-                                                            [np.float32, np.float32],
-                                                            onnx_file, verbose=True)
+        converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes,
+                                                    [np.float32, np.float32],
+                                                    onnx_file, verbose=True)
         sess_options = onnxruntime.SessionOptions()
         sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
         sess = onnxruntime.InferenceSession(onnx_file, sess_options)
@@ -219,9 +219,9 @@ def test_standard_rnn_lstm_pretrained_inference_onnxruntime(tmp_path, model_name
         onnx_file = "%s.onnx" % prefix
 
         input_shapes = [(seq_length, batch), np.shape(begin_state[0]), np.shape(begin_state[1])]
-        converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes,
-                                                            [np.float32, np.float32, np.float32],
-                                                            onnx_file, verbose=True)
+        converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes,
+                                                    [np.float32, np.float32, np.float32],
+                                                    onnx_file, verbose=True)
         sess_options = onnxruntime.SessionOptions()
         sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
         sess = onnxruntime.InferenceSession(onnx_file, sess_options)
@@ -278,10 +278,10 @@ def test_dynamic_shape_bert_inference_onnxruntime(tmp_path, model):
         dynamic_input_shapes = [(None, seq_length), (None, seq_length), (None,)]
         input_shapes = [(batch, seq_length), (batch, seq_length), (batch,)]
         input_types = [np.float32, np.float32, np.float32]
-        converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes,
-                                                            input_types, onnx_file,
-                                                            dynamic=True,
-                                                            dynamic_input_shapes=dynamic_input_shapes)
+        converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes,
+                                                    input_types, onnx_file,
+                                                    dynamic=True,
+                                                    dynamic_input_shapes=dynamic_input_shapes)
 
         # create onnxruntime session using the generated onnx file
         ses_opt = onnxruntime.SessionOptions()
@@ -345,8 +345,8 @@ def test_awd_rnn_lstm_pretrained_inference_onnxruntime(tmp_path, model_name, seq
                         np.shape(begin_state[2][0]), np.shape(begin_state[2][1])]
         input_types = [np.float32, np.float32, np.float32, np.float32, np.float32, np.float32,
                        np.float32]
-        converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes,
-                                                            input_types, onnx_file, verbose=True)
+        converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes,
+                                                    input_types, onnx_file, verbose=True)
 
         sess_options = onnxruntime.SessionOptions()
         sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
@@ -408,8 +408,8 @@ def test_ernie_inference_onnxruntime(tmp_path, model_name):
 
         input_shapes = [(batch, seq_length), (batch, seq_length), (batch,)]
         input_types = [np.float32, np.float32, np.float32]
-        converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes,
-                                                            input_types, onnx_file)
+        converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes,
+                                                    input_types, onnx_file)
 
         # create onnxruntime session using the generated onnx file
         ses_opt = onnxruntime.SessionOptions()
@@ -475,8 +475,8 @@ def test_transformer_pretrained_inference_onnxruntime(tmp_path, model_name):
             sym_file = "%s-symbol.json" % prefix
             params_file = "%s-0000.params" % prefix
             onnx_file = "%s.onnx" % prefix
-            return mx.contrib.onnx.export_model(sym_file, params_file, input_shapes, input_types,
-                                                onnx_file, **kwargs)
+            return mx.onnx.export_model(sym_file, params_file, input_shapes, input_types,
+                                        onnx_file, **kwargs)
 
         def onnx_runtime_predict(onnx_file, onnx_inputs):
             ses_opt = onnxruntime.SessionOptions()
@@ -650,8 +650,8 @@ def test_gpt_pretrained_inference_onnxruntime(tmp_path, model_params):
 
         input_shapes = [(batch, seq_length)]
         input_types = [np.float32]
-        converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes,
-                                                            input_types, onnx_file)
+        converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes,
+                                                    input_types, onnx_file)
 
         ses_opt = onnxruntime.SessionOptions()
         ses_opt.log_severity_level = 3
diff --git a/tests/python-pytest/onnx/test_operators.py b/tests/python-pytest/onnx/test_operators.py
index 51170e6..b032fa7 100644
--- a/tests/python-pytest/onnx/test_operators.py
+++ b/tests/python-pytest/onnx/test_operators.py
@@ -46,8 +46,8 @@ def op_export_test(model_name, Model, inputs, tmp_path, dummy_input=False, onnx_
         sym_file = '{}-symbol.json'.format(model_path)
         params_file = '{}-0000.params'.format(model_path)
         onnx_file = '{}/{}.onnx'.format(tmp_path, model_name)
-        mx.contrib.onnx.export_model(sym_file, params_file, [inp.shape for inp in inputs],
-                                     [inp.dtype for inp in inputs], onnx_file)
+        mx.onnx.export_model(sym_file, params_file, [inp.shape for inp in inputs],
+                             [inp.dtype for inp in inputs], onnx_file)
         return onnx_file
 
     def onnx_rt(onnx_file, inputs):
diff --git a/tools/license_header.py b/tools/license_header.py
index a745ea2..71b2811 100755
--- a/tools/license_header.py
+++ b/tools/license_header.py
@@ -122,6 +122,10 @@ _WHITE_LIST = [
                # This file
                'tools/license_header.py',
 
+               # Dual-Licensed under Apache 2.0 and Nvidia BSD-3
+               'python/mxnet/onnx/mx2onnx/_export_onnx.py',
+               'python/mxnet/onnx/mx2onnx/_op_translations.py',
+
                # Github template
                '.github/ISSUE_TEMPLATE/bug_report.md',
                '.github/ISSUE_TEMPLATE/feature_request.md',