You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by sk...@apache.org on 2018/12/26 19:54:37 UTC

[incubator-mxnet] branch master updated: ONNX test code cleanup (#13553)

This is an automated email from the ASF dual-hosted git repository.

skm pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new fd34dc5  ONNX test code cleanup (#13553)
fd34dc5 is described below

commit fd34dc5f847192dfd522555afdf13be1eb67b72b
Author: Vandana Kannan <va...@users.noreply.github.com>
AuthorDate: Wed Dec 26 11:54:12 2018 -0800

    ONNX test code cleanup (#13553)
    
    * ONNX test code cleanup
    
    * Make tests use the common test case list
    
    * Remove import test_cases
    
    * Make Gluon backend rep common
    
    * Partially enable broadcast tests
    
    * Common function to populate tests
    
    * Make backend common
    
    * test models
    
    * Test nodes
    
    * ONNX export: Test for fully connected
    
    * Edit CI scripts mxnet export test cleanup
    
    * Further cleanup backend tests
    
    * README
    
    * Some corrections
    
    * test case format for test_models
---
 ci/docker/runtime_functions.sh                     |  10 +-
 tests/python-pytest/onnx/README.md                 |  33 ++
 tests/python-pytest/onnx/{export => }/backend.py   |  70 ++-
 tests/python-pytest/onnx/backend_rep.py            |  46 ++
 tests/python-pytest/onnx/backend_test.py           |  55 +++
 .../python-pytest/onnx/export/mxnet_export_test.py | 495 ---------------------
 .../python-pytest/onnx/export/onnx_backend_test.py | 151 -------
 .../onnx/{import => }/gluon_backend_test.py        |  30 +-
 tests/python-pytest/onnx/import/gluon_backend.py   |  75 ----
 .../python-pytest/onnx/import/gluon_backend_rep.py |  71 ---
 tests/python-pytest/onnx/import/mxnet_backend.py   |  71 ---
 .../python-pytest/onnx/import/onnx_import_test.py  | 275 ------------
 tests/python-pytest/onnx/import/test_cases.py      | 120 -----
 .../onnx/{import => }/mxnet_backend_test.py        |  31 +-
 tests/python-pytest/onnx/mxnet_export_test.py      | 121 +++++
 tests/python-pytest/onnx/test_cases.py             | 132 ++++++
 tests/python-pytest/onnx/test_models.py            | 167 +++++++
 tests/python-pytest/onnx/test_node.py              | 164 +++++++
 18 files changed, 790 insertions(+), 1327 deletions(-)

diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh
index ad9bbe5..0ae9079 100755
--- a/ci/docker/runtime_functions.sh
+++ b/ci/docker/runtime_functions.sh
@@ -939,11 +939,11 @@ unittest_centos7_gpu() {
 integrationtest_ubuntu_cpu_onnx() {
 	set -ex
 	export PYTHONPATH=./python/
-	pytest tests/python-pytest/onnx/import/mxnet_backend_test.py
-	pytest tests/python-pytest/onnx/import/onnx_import_test.py
-	pytest tests/python-pytest/onnx/import/gluon_backend_test.py
-	pytest tests/python-pytest/onnx/export/onnx_backend_test.py
-	python tests/python-pytest/onnx/export/mxnet_export_test.py
+	pytest tests/python-pytest/onnx/gluon_backend_test.py
+	pytest tests/python-pytest/onnx/mxnet_backend_test.py
+	pytest tests/python-pytest/onnx/mxnet_export_test.py
+	pytest tests/python-pytest/onnx/test_models.py
+	pytest tests/python-pytest/onnx/test_node.py
 }
 
 integrationtest_ubuntu_gpu_python() {
diff --git a/tests/python-pytest/onnx/README.md b/tests/python-pytest/onnx/README.md
new file mode 100644
index 0000000..d8f58cb
--- /dev/null
+++ b/tests/python-pytest/onnx/README.md
@@ -0,0 +1,33 @@
+# ONNX tests
+
+## Directory structure:
+
+```bash
+.
+├── README.md
+├── backend.py
+├── backend_rep.py
+├── backend_test.py
+├── gluon_backend_test.py
+├── mxnet_backend_test.py
+├── mxnet_export_test.py
+├── test_cases.py
+├── test_models.py
+└── test_node.py
+```
+
+* `backend.py` - MXNetBackend. This file contains prepare(). \
+This class can be used for both, MXNet and Gluon backend.
+* `backend_rep.py` - MXNetBackendRep and GluonBackendRep for running inference
+* `backend_test.py` - prepare tests by including tests from `test_cases.py`
+* `gluon_backend_test.py` - Set backend as gluon and execute ONNX tests for ONNX->Gluon import.
+* `mxnet_backend_test.py` - Set backend as gluon and add tests for ONNX->MXNet import/export.
+Since MXNetBackend for export, tests both import and export, the test list in this file is
+a union of tests that execute for import and export, export alone, and import alone.
+* `mxnet_export_test.py` - Execute unit tests for testing MXNet export code - this is not specific to
+any operator.
+* `test_cases.py` - list of test cases for operators/models that are supported
+for "both", import and export, "import" alone, or "export" alone.
+* `test_models.py` - custom tests for models
+* `test_node.py` - custom tests for operators. These tests are written independent of ONNX tests, in case
+ONNX doesn't have tests yet or for MXNet specific operators.
\ No newline at end of file
diff --git a/tests/python-pytest/onnx/export/backend.py b/tests/python-pytest/onnx/backend.py
similarity index 57%
rename from tests/python-pytest/onnx/export/backend.py
rename to tests/python-pytest/onnx/backend.py
index 3ea1daf..2f9e247 100644
--- a/tests/python-pytest/onnx/export/backend.py
+++ b/tests/python-pytest/onnx/backend.py
@@ -16,51 +16,57 @@
 # under the License.
 
 # coding: utf-8
-"""backend wrapper for onnx test infrastructure"""
-import os
-import sys
-import numpy as np
+"""MXNet/Gluon backend wrapper for onnx test infrastructure"""
+
 from mxnet.contrib.onnx.onnx2mx.import_onnx import GraphProto
 from mxnet.contrib.onnx.mx2onnx.export_onnx import MXNetGraph
+import mxnet as mx
+import numpy as np
+
 try:
     from onnx import helper, TensorProto, mapping
     from onnx.backend.base import Backend
 except ImportError:
-    raise ImportError("Onnx and protobuf need to be installed")
-CURR_PATH = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
-sys.path.insert(0, os.path.join(CURR_PATH, '../'))
-from backend_rep import MXNetBackendRep
+    raise ImportError("Onnx and protobuf need to be installed. Instructions to"
+                      + " install - https://github.com/onnx/onnx#installation")
+from backend_rep import MXNetBackendRep, GluonBackendRep
+
 
-# Using these functions for onnx test infrastructure.
-# Implemented by following onnx docs guide:
-# https://github.com/onnx/onnx/blob/master/docs/Implementing%20an%20ONNX%20backend.md
 # MXNetBackend class will take an ONNX model with inputs, perform a computation,
 # and then return the output.
+# Implemented by following onnx docs guide:
+# https://github.com/onnx/onnx/blob/master/docs/ImplementingAnOnnxBackend.md
 
 class MXNetBackend(Backend):
-    """MXNet backend for ONNX"""
+    """MXNet/Gluon backend for ONNX"""
+
+    backend = 'mxnet'
+    operation = 'import'
+
+    @classmethod
+    def set_params(cls, backend, operation):
+        cls.backend = backend
+        cls.operation = operation
 
     @staticmethod
-    def perform_import_export(graph_proto, input_shape):
+    def perform_import_export(sym, arg_params, aux_params, input_shape):
         """ Import ONNX model to mxnet model and then export to ONNX model
             and then import it back to mxnet for verifying the result"""
         graph = GraphProto()
 
-        sym, arg_params, aux_params = graph.from_onnx(graph_proto)
-
         params = {}
         params.update(arg_params)
         params.update(aux_params)
         # exporting to onnx graph proto format
         converter = MXNetGraph()
-        graph_proto = converter.create_onnx_graph_proto(sym, params, in_shape=input_shape, in_type=mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('float32')])
+        graph_proto = converter.create_onnx_graph_proto(sym, params, in_shape=input_shape,
+                                                        in_type=mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('float32')])
 
         # importing back to MXNET for verifying result.
         sym, arg_params, aux_params = graph.from_onnx(graph_proto)
 
         return sym, arg_params, aux_params
 
-
     @classmethod
     def prepare(cls, model, device='CPU', **kwargs):
         """For running end to end model(used for onnx test backend)
@@ -80,13 +86,31 @@ class MXNetBackend(Backend):
             Returns object of MXNetBackendRep class which will be in turn
             used to run inference on the input model and return the result for comparison.
         """
+        backend = kwargs.get('backend', cls.backend)
+        operation = kwargs.get('operation', cls.operation)
 
         graph = GraphProto()
-        metadata = graph.get_graph_metadata(model.graph)
-        input_data = metadata['input_tensor_data']
-        input_shape = [data[1] for data in input_data]
-        sym, arg_params, aux_params = MXNetBackend.perform_import_export(model.graph, input_shape)
-        return MXNetBackendRep(sym, arg_params, aux_params, device)
+        if device == 'CPU':
+            ctx = mx.cpu()
+        else:
+            raise NotImplementedError("ONNX tests are run only for CPU context.")
+
+        if backend == 'mxnet':
+            sym, arg_params, aux_params = graph.from_onnx(model.graph)
+            if operation == 'export':
+                metadata = graph.get_graph_metadata(model.graph)
+                input_data = metadata['input_tensor_data']
+                input_shape = [data[1] for data in input_data]
+                sym, arg_params, aux_params = MXNetBackend.perform_import_export(sym, arg_params, aux_params,
+                                                                                 input_shape)
+
+            return MXNetBackendRep(sym, arg_params, aux_params, device)
+        elif backend == 'gluon':
+            if operation == 'import':
+                net = graph.graph_to_gluon(model.graph, ctx)
+                return GluonBackendRep(net, device)
+            elif operation == 'export':
+                raise NotImplementedError("Gluon->ONNX export not implemented.")
 
     @classmethod
     def supports_device(cls, device):
@@ -96,6 +120,4 @@ class MXNetBackend(Backend):
 
 prepare = MXNetBackend.prepare
 
-run_node = MXNetBackend.run_node
-
 supports_device = MXNetBackend.supports_device
diff --git a/tests/python-pytest/onnx/backend_rep.py b/tests/python-pytest/onnx/backend_rep.py
index 63836ac..bcb99ab 100644
--- a/tests/python-pytest/onnx/backend_rep.py
+++ b/tests/python-pytest/onnx/backend_rep.py
@@ -22,7 +22,9 @@ try:
 except ImportError:
     raise ImportError("Onnx and protobuf need to be installed. Instructions to"
                       + " install - https://github.com/onnx/onnx#installation")
+import numpy as np
 import mxnet as mx
+from mxnet import nd
 
 # Using these functions for onnx test infrastructure.
 # Implemented by following onnx docs guide:
@@ -82,3 +84,47 @@ class MXNetBackendRep(BackendRep):
         exe.forward(is_train=False)
         result = exe.outputs[0].asnumpy()
         return [result]
+
+
+# GluonBackendRep object will be returned by GluonBackend's prepare method which is used to
+# execute a model repeatedly.
+# Inputs will be passed to the run method of MXNetBackendRep class, it will perform computation and
+# retrieve the corresponding results for comparison to the onnx backend.
+# https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py.
+# Implemented by following onnx docs guide:
+# https://github.com/onnx/onnx/blob/master/docs/ImplementingAnOnnxBackend.md
+
+class GluonBackendRep(BackendRep):
+    """Running model inference on gluon backend and return the result
+     to onnx test infrastructure for comparison."""
+    def __init__(self, net, device):
+        self.net = net
+        self.device = device
+
+    def run(self, inputs, **kwargs):
+        """Run model inference and return the result
+
+        Parameters
+        ----------
+        inputs : numpy array
+            input to run a layer on
+
+        Returns
+        -------
+        params : numpy array
+            result obtained after running the inference on mxnet
+        """
+        # create module, passing cpu context
+        if self.device == 'CPU':
+            ctx = mx.cpu()
+        else:
+            raise NotImplementedError("ONNX tests are run only for CPU context.")
+
+        # run inference
+        net_inputs = [nd.array(input_data, ctx=ctx) for input_data in inputs]
+        net_outputs = self.net(*net_inputs)
+        results = []
+        results.extend([o for o in net_outputs.asnumpy()])
+        result = np.array(results)
+
+        return [result]
diff --git a/tests/python-pytest/onnx/backend_test.py b/tests/python-pytest/onnx/backend_test.py
new file mode 100644
index 0000000..6c6c3d2
--- /dev/null
+++ b/tests/python-pytest/onnx/backend_test.py
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""ONNX test backend wrapper"""
+try:
+    import onnx.backend.test
+except ImportError:
+    raise ImportError("Onnx and protobuf need to be installed")
+
+import test_cases
+
+
+def prepare_tests(backend, operation):
+    """
+    Prepare the test list
+    :param backend: mxnet/gluon backend
+    :param operation: str. export or import
+    :return: backend test list
+    """
+    BACKEND_TESTS = onnx.backend.test.BackendTest(backend, __name__)
+    implemented_ops = test_cases.IMPLEMENTED_OPERATORS_TEST.get('both', []) + \
+                      test_cases.IMPLEMENTED_OPERATORS_TEST.get(operation, [])
+
+    for op_test in implemented_ops:
+        BACKEND_TESTS.include(op_test)
+
+    basic_models = test_cases.BASIC_MODEL_TESTS.get('both', []) + \
+                   test_cases.BASIC_MODEL_TESTS.get(operation, [])
+
+    for basic_model_test in basic_models:
+        BACKEND_TESTS.include(basic_model_test)
+
+    std_models = test_cases.STANDARD_MODEL.get('both', []) + \
+                 test_cases.STANDARD_MODEL.get(operation, [])
+
+    for std_model_test in std_models:
+        BACKEND_TESTS.include(std_model_test)
+
+    BACKEND_TESTS.exclude('.*bcast.*')
+
+    return BACKEND_TESTS
diff --git a/tests/python-pytest/onnx/export/mxnet_export_test.py b/tests/python-pytest/onnx/export/mxnet_export_test.py
deleted file mode 100644
index b4fa4b1..0000000
--- a/tests/python-pytest/onnx/export/mxnet_export_test.py
+++ /dev/null
@@ -1,495 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-"""
-Tests for individual operators
-This module contains operator tests which currently do not exist on
-ONNX backend test framework. Once we have PRs on the ONNX repo and get
-those PRs merged, this file will get EOL'ed.
-"""
-# pylint: disable=too-many-locals,wrong-import-position,import-error
-from __future__ import absolute_import
-import sys
-import os
-import unittest
-import logging
-import tarfile
-import tempfile
-from collections import namedtuple
-import numpy as np
-import numpy.testing as npt
-from onnx import numpy_helper, helper
-from onnx import TensorProto
-from mxnet import nd, sym
-from mxnet.gluon import nn
-from mxnet.test_utils import download
-from mxnet.contrib import onnx as onnx_mxnet
-import mxnet as mx
-CURR_PATH = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
-sys.path.insert(0, os.path.join(CURR_PATH, '../../../python/unittest'))
-import backend
-from common import with_seed
-
-logger = logging.getLogger()
-logger.setLevel(logging.DEBUG)
-URLS = {
-    'bvlc_googlenet':
-        'https://s3.amazonaws.com/onnx-mxnet/model-zoo/bvlc_googlenet.tar.gz',
-    'bvlc_reference_caffenet':
-        'https://s3.amazonaws.com/onnx-mxnet/model-zoo/bvlc_reference_caffenet.tar.gz',
-    'bvlc_reference_rcnn_ilsvrc13':
-        'https://s3.amazonaws.com/onnx-mxnet/model-zoo/bvlc_reference_rcnn_ilsvrc13.tar.gz',
-    'inception_v1':
-        'https://s3.amazonaws.com/onnx-mxnet/model-zoo/inception_v1.tar.gz',
-    'inception_v2':
-        'https://s3.amazonaws.com/onnx-mxnet/model-zoo/inception_v2.tar.gz'
-}
-
-def get_test_files(name):
-    """Extract tar file and returns model path and input, output data"""
-    tar_name = download(URLS.get(name), dirname=CURR_PATH.__str__())
-    # extract tar file
-    tar_path = os.path.join(CURR_PATH, tar_name)
-    tar = tarfile.open(tar_path.__str__(), "r:*")
-    tar.extractall(path=CURR_PATH.__str__())
-    tar.close()
-    data_dir = os.path.join(CURR_PATH, name)
-    model_path = os.path.join(data_dir, 'model.onnx')
-
-    inputs = []
-    outputs = []
-    # get test files
-    for test_file in os.listdir(data_dir):
-        case_dir = os.path.join(data_dir, test_file)
-        # skip the non-dir files
-        if not os.path.isdir(case_dir):
-            continue
-        input_file = os.path.join(case_dir, 'input_0.pb')
-        input_tensor = TensorProto()
-        with open(input_file, 'rb') as proto_file:
-            input_tensor.ParseFromString(proto_file.read())
-        inputs.append(numpy_helper.to_array(input_tensor))
-
-        output_tensor = TensorProto()
-        output_file = os.path.join(case_dir, 'output_0.pb')
-        with open(output_file, 'rb') as proto_file:
-            output_tensor.ParseFromString(proto_file.read())
-        outputs.append(numpy_helper.to_array(output_tensor))
-
-    return model_path, inputs, outputs
-
-
-def forward_pass(sym, arg, aux, data_names, input_data):
-    """ Perform forward pass on given data
-    :param sym: Symbol
-    :param arg: Arg params
-    :param aux: Aux params
-    :param data_names: Input names (list)
-    :param input_data: Input data (list). If there is only one input,
-                        pass it as a list. For example, if input is [1, 2],
-                        pass input_data=[[1, 2]]
-    :return: result of forward pass
-    """
-    # create module
-    mod = mx.mod.Module(symbol=sym, data_names=data_names, context=mx.cpu(), label_names=None)
-
-    data_shapes = []
-    data_forward = []
-    for idx in range(len(data_names)):
-        val = input_data[idx]
-        data_shapes.append((data_names[idx], np.shape(val)))
-        data_forward.append(mx.nd.array(val))
-
-    mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None)
-    mod.set_params(arg_params=arg, aux_params=aux,
-                   allow_missing=True, allow_extra=True)
-
-    # run inference
-    batch = namedtuple('Batch', ['data'])
-    mod.forward(batch(data_forward), is_train=False)
-
-    return mod.get_outputs()[0].asnumpy()
-
-
-def test_models(model_name, input_shape, output_shape):
-    """ Tests Googlenet model for both onnx import and export"""
-    model_path, inputs, outputs = get_test_files(model_name)
-    logging.info("Translating model from ONNX model zoo to Mxnet")
-    sym, arg_params, aux_params = onnx_mxnet.import_model(model_path)
-    params = {}
-    params.update(arg_params)
-    params.update(aux_params)
-
-    dir_path = os.path.dirname(model_path)
-    new_model_name = "exported_" + model_name + ".onnx"
-    onnx_file = os.path.join(dir_path, new_model_name)
-
-    logging.info("Translating converted model from mxnet to ONNX")
-    converted_model_path = onnx_mxnet.export_model(sym, params, [input_shape], np.float32, onnx_file)
-
-    sym, arg_params, aux_params = onnx_mxnet.import_model(converted_model_path)
-
-    metadata = onnx_mxnet.get_model_metadata(converted_model_path)
-    assert len(metadata) == 2
-    assert metadata.get('input_tensor_data')
-    assert metadata.get('input_tensor_data')[0][1] == input_shape
-    assert metadata.get('output_tensor_data')
-    assert metadata.get('output_tensor_data')[0][1] == output_shape
-    data_names = [input_name[0] for input_name in metadata.get('input_tensor_data')]
-
-    logging.info("Running inference on onnx re-import model in mxnet")
-    # run test for each test file
-    for input_data, output_data in zip(inputs, outputs):
-        result = forward_pass(sym, arg_params, aux_params, data_names, [input_data])
-
-        # verify the results
-        npt.assert_equal(result.shape, output_data.shape)
-        npt.assert_almost_equal(output_data, result, decimal=3)
-    logging.info(model_name + " conversion successful")
-
-
-def test_model_accuracy(model_name, input_shape):
-    """ Imports ONNX model, runs inference, exports and imports back
-        run inference, compare result with the previous inference result"""
-    model_path, inputs, outputs = get_test_files(model_name)
-    logging.info("Translating model from ONNX model zoo to Mxnet")
-    sym, arg_params, aux_params = onnx_mxnet.import_model(model_path)
-
-    metadata = onnx_mxnet.get_model_metadata(model_path)
-    data_names = [input_name[0] for input_name in metadata.get('input_tensor_data')]
-
-    expected_result= []
-    for input_data, output_data in zip(inputs, outputs):
-        result = forward_pass(sym, arg_params, aux_params, data_names, [input_data])
-        expected_result.append(result)
-
-    params = {}
-    params.update(arg_params)
-    params.update(aux_params)
-
-    dir_path = os.path.dirname(model_path)
-    new_model_name = "exported_" + model_name + ".onnx"
-    onnx_file = os.path.join(dir_path, new_model_name)
-
-    logging.info("Translating converted model from mxnet to ONNX")
-    converted_model_path = onnx_mxnet.export_model(sym, params, [input_shape], np.float32,
-                                                   onnx_file)
-
-    sym, arg_params, aux_params = onnx_mxnet.import_model(converted_model_path)
-
-    metadata = onnx_mxnet.get_model_metadata(converted_model_path)
-    data_names = [input_name[0] for input_name in metadata.get('input_tensor_data')]
-
-    actual_result = []
-    for input_data, output_data in zip(inputs, outputs):
-        result = forward_pass(sym, arg_params, aux_params, data_names, [input_data])
-        actual_result.append(result)
-
-    # verify the results
-    for expected, actual in zip(expected_result, actual_result):
-        npt.assert_equal(expected.shape, actual.shape)
-        npt.assert_almost_equal(expected, actual, decimal=3)
-
-@with_seed()
-def test_spacetodepth():
-    n, c, h, w = shape = (1, 1, 4, 6)
-    input1 = np.random.rand(n, c, h, w).astype("float32")
-    blocksize = 2
-    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=shape)]
-
-    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 4, 2, 3))]
-
-    nodes = [helper.make_node("SpaceToDepth", ["input1"], ["output"], block_size=blocksize)]
-
-    graph = helper.make_graph(nodes,
-                              "spacetodepth_test",
-                              inputs,
-                              outputs)
-
-    spacetodepth_model = helper.make_model(graph)
-
-    bkd_rep = backend.prepare(spacetodepth_model)
-    output = bkd_rep.run([input1])
-
-    tmp = np.reshape(input1, [n, c,
-                    h // blocksize, blocksize,
-                    w // blocksize, blocksize])
-    tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
-    numpy_op = np.reshape(tmp, [n, c * (blocksize**2),
-                    h // blocksize,
-                    w // blocksize])
-
-    npt.assert_almost_equal(output[0], numpy_op)
-
-@with_seed()
-def test_square():
-    input1 = np.random.randint(1, 10, (2, 3)).astype("float32")
-
-    ipsym = mx.sym.Variable("input1")
-    square = mx.sym.square(data=ipsym)
-    model = mx.mod.Module(symbol=square, data_names=['input1'], label_names=None)
-    model.bind(for_training=False, data_shapes=[('input1', np.shape(input1))], label_shapes=None)
-    model.init_params()
-
-    args, auxs = model.get_params()
-    params = {}
-    params.update(args)
-    params.update(auxs)
-
-    converted_model = onnx_mxnet.export_model(square, params, [np.shape(input1)], np.float32, "square.onnx")
-
-    sym, arg_params, aux_params = onnx_mxnet.import_model(converted_model)
-    result = forward_pass(sym, arg_params, aux_params, ['input1'], [input1])
-
-    numpy_op = np.square(input1)
-
-    npt.assert_almost_equal(result, numpy_op)
-
-
-@with_seed()
-def test_fully_connected():
-    def random_arrays(*shapes):
-        """Generate some random numpy arrays."""
-        arrays = [np.random.randn(*s).astype("float32")
-                  for s in shapes]
-        if len(arrays) == 1:
-            return arrays[0]
-        return arrays
-
-    data_names = ['x', 'w', 'b']
-
-    dim_in, dim_out = (3, 4)
-    input_data = random_arrays((4, dim_in), (dim_out, dim_in), (dim_out,))
-
-    ipsym = []
-    data_shapes = []
-    data_forward = []
-    for idx in range(len(data_names)):
-        val = input_data[idx]
-        data_shapes.append((data_names[idx], np.shape(val)))
-        data_forward.append(mx.nd.array(val))
-        ipsym.append(mx.sym.Variable(data_names[idx]))
-
-    op = mx.sym.FullyConnected(data=ipsym[0], weight=ipsym[1], bias=ipsym[2], num_hidden=dim_out, name='FC')
-
-    model = mx.mod.Module(op, data_names=data_names, label_names=None)
-    model.bind(for_training=False, data_shapes=data_shapes, label_shapes=None)
-
-    model.init_params()
-
-    args, auxs = model.get_params()
-    params = {}
-    params.update(args)
-    params.update(auxs)
-
-    converted_model = onnx_mxnet.export_model(op, params, [shape[1] for shape in data_shapes], np.float32, "fc.onnx")
-
-    sym, arg_params, aux_params = onnx_mxnet.import_model(converted_model)
-    result = forward_pass(sym, arg_params, aux_params, data_names, input_data)
-
-    numpy_op = np.dot(input_data[0], input_data[1].T) + input_data[2]
-
-    npt.assert_almost_equal(result, numpy_op)
-
-
-def test_softmax():
-    input1 = np.random.rand(1000, 1000).astype("float32")
-    label1 = np.random.rand(1000)
-    input_nd = mx.nd.array(input1)
-    label_nd = mx.nd.array(label1)
-
-    ipsym = mx.sym.Variable("ipsym")
-    label = mx.sym.Variable('label')
-    sym = mx.sym.SoftmaxOutput(data=ipsym, label=label, ignore_label=0, use_ignore=False)
-    ex = sym.bind(ctx=mx.cpu(0), args={'ipsym': input_nd, 'label': label_nd})
-    ex.forward(is_train=True)
-    softmax_out = ex.outputs[0].asnumpy()
-
-    converted_model = onnx_mxnet.export_model(sym, {}, [(1000, 1000), (1000,)], np.float32, "softmaxop.onnx")
-
-    sym, arg_params, aux_params = onnx_mxnet.import_model(converted_model)
-    result = forward_pass(sym, arg_params, aux_params, ['ipsym'], input1)
-
-    # Comparing result of forward pass before using onnx export, import
-    npt.assert_almost_equal(result, softmax_out)
-
-@with_seed()
-def test_comparison_ops():
-    """Test greater, lesser, equal"""
-    def test_ops(op_name, inputs, input_tensors, numpy_op):
-        outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=np.shape(inputs[0]))]
-        nodes = [helper.make_node(op_name, ["input"+str(i+1) for i in range(len(inputs))], ["output"])]
-        graph = helper.make_graph(nodes,
-                                  op_name + "_test",
-                                  input_tensors,
-                                  outputs)
-        model = helper.make_model(graph)
-        bkd_rep = backend.prepare(model)
-        output = bkd_rep.run(inputs)
-        npt.assert_almost_equal(output[0], numpy_op)
-    input_data = [np.random.rand(1, 3, 4, 5).astype("float32"),
-                  np.random.rand(1, 5).astype("float32")]
-    input_tensor = []
-    for idx, ip in enumerate(input_data):
-        input_tensor.append(helper.make_tensor_value_info("input" + str(idx + 1),
-                                                          TensorProto.FLOAT, shape=np.shape(ip)))
-    test_ops("Greater", input_data, input_tensor,
-             np.greater(input_data[0], input_data[1]).astype(np.float32))
-    test_ops("Less", input_data, input_tensor,
-             np.less(input_data[0], input_data[1]).astype(np.float32))
-    test_ops("Equal", input_data, input_tensor,
-             np.equal(input_data[0], input_data[1]).astype(np.float32))
-
-
-def get_int_inputs(interval, shape):
-    """Helper to get integer input of given shape and range"""
-    assert len(interval) == len(shape)
-    inputs = []
-    input_tensors = []
-    for idx in range(len(interval)):
-        low, high = interval[idx]
-        inputs.append(np.random.randint(low, high, size=shape[idx]).astype("float32"))
-        input_tensors.append(helper.make_tensor_value_info("input"+str(idx+1),
-                                                        TensorProto.FLOAT, shape=shape[idx]))
-    return inputs, input_tensors
-
-
-@with_seed()
-def test_logical_ops():
-    """Test for logical and, or, not, xor operators"""
-    def test_ops(op_name, inputs, input_tensors, numpy_op):
-        outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=np.shape(inputs[0]))]
-        nodes = [helper.make_node(op_name, ["input"+str(i+1) for i in range(len(inputs))], ["output"])]
-        graph = helper.make_graph(nodes,
-                                  op_name + "_test",
-                                  input_tensors,
-                                  outputs)
-        model = helper.make_model(graph)
-        bkd_rep = backend.prepare(model)
-        output = bkd_rep.run(inputs)
-        npt.assert_almost_equal(output[0], numpy_op)
-    input_data, input_tensor = get_int_inputs([(0, 2), (0, 2)], [(3, 4, 5), (3, 4, 5)])
-    test_ops("And", input_data, input_tensor,
-             np.logical_and(input_data[0], input_data[1]).astype(np.float32))
-    test_ops("Or", input_data, input_tensor,
-             np.logical_or(input_data[0], input_data[1]).astype(np.float32))
-    test_ops("Xor", input_data, input_tensor,
-             np.logical_xor(input_data[0], input_data[1]).astype(np.float32))
-    test_ops("Not", [input_data[0]], [input_tensor[0]],
-             np.logical_not(input_data[0]).astype(np.float32))
-
-
-def _assert_sym_equal(lhs, rhs):
-    assert lhs.list_inputs() == rhs.list_inputs()  # input names must be identical
-    assert len(lhs.list_outputs()) == len(rhs.list_outputs())  # number of outputs must be identical
-
-
-def _force_list(output):
-    if isinstance(output, nd.NDArray):
-        return [output]
-    return list(output)
-
-
-def _optional_group(symbols, group=False):
-    if group:
-        return sym.Group(symbols)
-    else:
-        return symbols
-
-
-def _check_onnx_export(net, group_outputs=False, shape_type=tuple, extra_params={}):
-    net.initialize()
-    data = nd.random.uniform(0, 1, (1, 1024))
-    output = _force_list(net(data))  # initialize weights
-    net_sym = _optional_group(net(sym.Variable('data')), group_outputs)
-    net_params = {name:param._reduce() for name, param in net.collect_params().items()}
-    net_params.update(extra_params)
-    with tempfile.TemporaryDirectory() as tmpdirname:
-        onnx_file_path = os.path.join(tmpdirname, 'net.onnx')
-        export_path = onnx_mxnet.export_model(
-            sym=net_sym,
-            params=net_params,
-            input_shape=[shape_type(data.shape)],
-            onnx_file_path=onnx_file_path)
-        assert export_path == onnx_file_path
-        # Try importing the model to symbol
-        _assert_sym_equal(net_sym, onnx_mxnet.import_model(export_path)[0])
-
-        # Try importing the model to gluon
-        imported_net = onnx_mxnet.import_to_gluon(export_path, ctx=None)
-        _assert_sym_equal(net_sym, _optional_group(imported_net(sym.Variable('data')), group_outputs))
-
-        # Confirm network outputs are the same
-        imported_net_output = _force_list(imported_net(data))
-        for out, imp_out in zip(output, imported_net_output):
-            mx.test_utils.assert_almost_equal(out.asnumpy(), imp_out.asnumpy())
-
-
-@with_seed()
-def test_onnx_export_single_output():
-    net = nn.HybridSequential(prefix='single_output_net')
-    with net.name_scope():
-        net.add(nn.Dense(100, activation='relu'), nn.Dense(10))
-    _check_onnx_export(net)
-
-
-@with_seed()
-def test_onnx_export_multi_output():
-    class MultiOutputBlock(nn.HybridBlock):
-        def __init__(self):
-            super(MultiOutputBlock, self).__init__()
-            with self.name_scope():
-                self.net = nn.HybridSequential()
-                for i in range(10):
-                    self.net.add(nn.Dense(100 + i * 10, activation='relu'))
-
-        def hybrid_forward(self, F, x):
-            out = tuple(block(x) for block in self.net._children.values())
-            return out
-
-    net = MultiOutputBlock()
-    assert len(sym.Group(net(sym.Variable('data'))).list_outputs()) == 10
-    _check_onnx_export(net, group_outputs=True)
-
-
-@with_seed()
-def test_onnx_export_list_shape():
-    net = nn.HybridSequential(prefix='list_shape_net')
-    with net.name_scope():
-        net.add(nn.Dense(100, activation='relu'), nn.Dense(10))
-    _check_onnx_export(net, shape_type=list)
-
-
-@with_seed()
-def test_onnx_export_extra_params():
-    net = nn.HybridSequential(prefix='extra_params_net')
-    with net.name_scope():
-        net.add(nn.Dense(100, activation='relu'), nn.Dense(10))
-    _check_onnx_export(net, extra_params={'extra_param': nd.array([1, 2])})
-
-
-if __name__ == '__main__':
-    test_models("bvlc_googlenet", (1, 3, 224, 224), (1, 1000))
-    test_models("bvlc_reference_caffenet", (1, 3, 224, 224), (1, 1000))
-    test_models("bvlc_reference_rcnn_ilsvrc13", (1, 3, 224, 224), (1, 200))
-
-    # Comparing MXNet inference result, since MXNet results don't match
-    # ONNX expected results due to AveragePool issue github issue(#10194)
-    test_model_accuracy("inception_v1", (1, 3, 224, 224))
-    test_model_accuracy("inception_v2", (1, 3, 224, 224))
-
-    unittest.main()
diff --git a/tests/python-pytest/onnx/export/onnx_backend_test.py b/tests/python-pytest/onnx/export/onnx_backend_test.py
deleted file mode 100644
index c9926c4..0000000
--- a/tests/python-pytest/onnx/export/onnx_backend_test.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-"""ONNX test backend wrapper"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import unittest
-try:
-    import onnx.backend.test
-except ImportError:
-    raise ImportError("Onnx and protobuf need to be installed")
-
-import backend as mxnet_backend
-
-# This is a pytest magic variable to load extra plugins
-pytest_plugins = "onnx.backend.test.report",
-
-BACKEND_TESTS = onnx.backend.test.BackendTest(mxnet_backend, __name__)
-
-IMPLEMENTED_OPERATORS_TEST = [
-    'test_random_uniform',
-    'test_random_normal',
-    'test_add',
-    'test_sub',
-    'test_mul',
-    'test_div',
-    'test_neg',
-    'test_abs',
-    'test_sum',
-    'test_tanh',
-    'test_cos',
-    'test_sin',
-    'test_tan',
-    'test_acos',
-    'test_asin',
-    'test_atan'
-    'test_ceil',
-    'test_floor',
-    'test_concat',
-    'test_identity',
-    'test_sigmoid',
-    'test_relu',
-    'test_constant_pad',
-    'test_edge_pad',
-    'test_reflect_pad',
-    'test_reduce_min',
-    'test_reduce_max',
-    'test_reduce_mean',
-    'test_reduce_prod',
-    'test_reduce_sum_d',
-    'test_reduce_sum_keepdims_random',
-    'test_squeeze',
-    'test_softmax_example',
-    'test_softmax_large_number',
-    'test_softmax_axis_2',
-    'test_transpose',
-    'test_globalmaxpool',
-    'test_globalaveragepool',
-    # enabling partial test cases for matmul
-    'test_matmul_3d',
-    'test_matmul_4d',
-    'test_slice_cpu',
-    'test_slice_neg',
-    'test_squeeze_',
-    'test_reciprocal',
-    'test_sqrt',
-    'test_pow',
-    'test_exp_',
-    'test_argmax',
-    'test_argmin',
-    'test_min',
-    'test_max'
-    #pytorch operator tests
-    'test_operator_exp',
-    'test_operator_maxpool',
-    'test_operator_params',
-    'test_operator_permute2',
-    'test_clip'
-    'test_cast',
-    'test_depthtospace',
-    'test_hardsigmoid',
-    'test_instancenorm',
-    'test_shape',
-    'test_size'
-    ]
-
-BASIC_MODEL_TESTS = [
-    'test_AvgPool2D',
-    'test_BatchNorm',
-    'test_ConstantPad2d',
-    'test_Conv2d',
-    'test_ELU',
-    'test_LeakyReLU',
-    'test_MaxPool',
-    'test_PReLU',
-    'test_ReLU',
-    'test_selu_default'
-    'test_Sigmoid',
-    'test_Softmax',
-    'test_softmax_functional',
-    'test_softmax_lastdim',
-    'test_Tanh'
-    ]
-
-STANDARD_MODEL = [
-    'test_bvlc_alexnet',
-    'test_densenet121',
-    # 'test_inception_v1',
-    # 'test_inception_v2',
-    'test_resnet50',
-    # 'test_shufflenet',
-    'test_squeezenet',
-    'test_vgg16',
-    'test_vgg19'
-    ]
-
-for op_test in IMPLEMENTED_OPERATORS_TEST:
-    BACKEND_TESTS.include(op_test)
-
-for basic_model_test in BASIC_MODEL_TESTS:
-    BACKEND_TESTS.include(basic_model_test)
-
-for std_model_test in STANDARD_MODEL:
-    BACKEND_TESTS.include(std_model_test)
-
-BACKEND_TESTS.exclude('.*broadcast.*')
-BACKEND_TESTS.exclude('.*bcast.*')
-
-
-# import all test cases at global scope to make them visible to python.unittest
-globals().update(BACKEND_TESTS.enable_report().test_cases)
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/tests/python-pytest/onnx/import/gluon_backend_test.py b/tests/python-pytest/onnx/gluon_backend_test.py
similarity index 60%
rename from tests/python-pytest/onnx/import/gluon_backend_test.py
rename to tests/python-pytest/onnx/gluon_backend_test.py
index 6dd5f8a..0f320ae 100644
--- a/tests/python-pytest/onnx/import/gluon_backend_test.py
+++ b/tests/python-pytest/onnx/gluon_backend_test.py
@@ -22,34 +22,24 @@ from __future__ import print_function
 from __future__ import unicode_literals
 
 import unittest
+import backend as mxnet_backend
+import backend_test
+
 try:
     import onnx.backend.test
 except ImportError:
-    raise ImportError("Onnx and protobuf need to be installed. Instructions to"
-                      + " install - https://github.com/onnx/onnx#installation")
-
-import gluon_backend
-import test_cases
+    raise ImportError("Onnx and protobuf need to be installed")
 
+operations = ['import']  # Gluon->ONNX exprot is not supported yet
 # This is a pytest magic variable to load extra plugins
 pytest_plugins = "onnx.backend.test.report",
 
-BACKEND_TESTS = onnx.backend.test.BackendTest(gluon_backend, __name__)
-
-for op_tests in test_cases.IMPLEMENTED_OPERATORS_TEST:
-    BACKEND_TESTS.include(op_tests)
-
-for std_model_test in test_cases.STANDARD_MODEL:
-    BACKEND_TESTS.include(std_model_test)
-
-for basic_model_test in test_cases.BASIC_MODEL_TESTS:
-    BACKEND_TESTS.include(basic_model_test)
-
-BACKEND_TESTS.exclude('.*broadcast.*')
-BACKEND_TESTS.exclude('.*bcast.*')
 
-# import all test cases at global scope to make them visible to python.unittest
-globals().update(BACKEND_TESTS.enable_report().test_cases)
+for operation in operations:
+    mxnet_backend.MXNetBackend.set_params('gluon', operation)
+    BACKEND_TESTS = backend_test.prepare_tests(mxnet_backend, operation)
+    # import all test cases at global scope to make them visible to python.unittest
+    globals().update(BACKEND_TESTS.enable_report().test_cases)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/tests/python-pytest/onnx/import/gluon_backend.py b/tests/python-pytest/onnx/import/gluon_backend.py
deleted file mode 100644
index 25be60b..0000000
--- a/tests/python-pytest/onnx/import/gluon_backend.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# coding: utf-8
-"""Gluon backend wrapper for onnx test infrastructure"""
-from mxnet.contrib.onnx.onnx2mx.import_onnx import GraphProto
-import mxnet as mx
-
-try:
-    from onnx import helper, TensorProto
-    from onnx.backend.base import Backend
-except ImportError:
-    raise ImportError("Onnx and protobuf need to be installed. Instructions to"
-                      + " install - https://github.com/onnx/onnx#installation")
-from gluon_backend_rep import GluonBackendRep
-
-# GluonBackend class will take an ONNX model with inputs, perform a computation,
-# and then return the output.
-# Implemented by following onnx docs guide:
-# https://github.com/onnx/onnx/blob/master/docs/ImplementingAnOnnxBackend.md
-
-class GluonBackend(Backend):
-    """Gluon backend for ONNX"""
-
-    @classmethod
-    def prepare(cls, model, device='CPU', **kwargs):
-        """For running end to end model(used for onnx test backend)
-
-        Parameters
-        ----------
-        model  : onnx ModelProto object
-            loaded onnx graph
-        device : 'CPU'
-            specifying device to run test on
-        kwargs :
-            other arguments
-
-        Returns
-        -------
-        GluonBackendRep : object
-            Returns object of GluonBackendRep class which will be in turn
-            used to run inference on the input model and return the result for comparison.
-        """
-        graph = GraphProto()
-        if device == 'CPU':
-            ctx = mx.cpu()
-        else:
-            raise NotImplementedError("ONNX tests are run only for CPU context.")
-
-        net = graph.graph_to_gluon(model.graph, ctx)
-        return GluonBackendRep(net, device)
-
-    @classmethod
-    def supports_device(cls, device):
-        """Supports only CPU for testing"""
-        return device == 'CPU'
-
-
-prepare = GluonBackend.prepare
-
-supports_device = GluonBackend.supports_device
diff --git a/tests/python-pytest/onnx/import/gluon_backend_rep.py b/tests/python-pytest/onnx/import/gluon_backend_rep.py
deleted file mode 100644
index 04c6ddd..0000000
--- a/tests/python-pytest/onnx/import/gluon_backend_rep.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# coding: utf-8
-"""gluon backend rep for onnx test infrastructure"""
-import numpy as np
-try:
-    from onnx.backend.base import BackendRep
-except ImportError:
-    raise ImportError("Onnx and protobuf need to be installed. Instructions to"
-                      + " install - https://github.com/onnx/onnx#installation")
-import mxnet as mx
-from mxnet import nd
-
-# GluonBackendRep object will be returned by GluonBackend's prepare method which is used to
-# execute a model repeatedly.
-# Inputs will be passed to the run method of MXNetBackendRep class, it will perform computation and
-# retrieve the corresponding results for comparison to the onnx backend.
-# https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py.
-# Implemented by following onnx docs guide:
-# https://github.com/onnx/onnx/blob/master/docs/ImplementingAnOnnxBackend.md
-
-
-class GluonBackendRep(BackendRep):
-    """Running model inference on gluon backend and return the result
-     to onnx test infrastructure for comparison."""
-    def __init__(self, net, device):
-        self.net = net
-        self.device = device
-
-    def run(self, inputs, **kwargs):
-        """Run model inference and return the result
-
-        Parameters
-        ----------
-        inputs : numpy array
-            input to run a layer on
-
-        Returns
-        -------
-        params : numpy array
-            result obtained after running the inference on mxnet
-        """
-        # create module, passing cpu context
-        if self.device == 'CPU':
-            ctx = mx.cpu()
-        else:
-            raise NotImplementedError("ONNX tests are run only for CPU context.")
-
-        # run inference
-        net_inputs = [nd.array(input_data, ctx=ctx) for input_data in inputs]
-        net_outputs = self.net(*net_inputs)
-        results = []
-        results.extend([o for o in net_outputs.asnumpy()])
-        result = np.array(results)
-
-        return [result]
diff --git a/tests/python-pytest/onnx/import/mxnet_backend.py b/tests/python-pytest/onnx/import/mxnet_backend.py
deleted file mode 100644
index bd4910b..0000000
--- a/tests/python-pytest/onnx/import/mxnet_backend.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# coding: utf-8
-"""MXNet backend wrapper for onnx test infrastructure"""
-import os
-import sys
-from mxnet.contrib.onnx.onnx2mx.import_onnx import GraphProto
-try:
-    from onnx import helper, TensorProto
-    from onnx.backend.base import Backend
-except ImportError:
-    raise ImportError("Onnx and protobuf need to be installed. Instructions to"
-                      + " install - https://github.com/onnx/onnx#installation")
-CURR_PATH = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
-sys.path.insert(0, os.path.join(CURR_PATH, '../'))
-from backend_rep import MXNetBackendRep
-
-# MXNetBackend class will take an ONNX model with inputs, perform a computation,
-# and then return the output.
-# Implemented by following onnx docs guide:
-# https://github.com/onnx/onnx/blob/master/docs/ImplementingAnOnnxBackend.md
-
-class MXNetBackend(Backend):
-    """MXNet backend for ONNX"""
-
-    @classmethod
-    def prepare(cls, model, device='CPU', **kwargs):
-        """For running end to end model(used for onnx test backend)
-
-        Parameters
-        ----------
-        model  : onnx ModelProto object
-            loaded onnx graph
-        device : 'CPU'
-            specifying device to run test on
-        kwargs :
-            other arguments
-
-        Returns
-        -------
-        MXNetBackendRep : object
-            Returns object of MXNetBackendRep class which will be in turn
-            used to run inference on the input model and return the result for comparison.
-        """
-        graph = GraphProto()
-        sym, arg_params, aux_params = graph.from_onnx(model.graph)
-        return MXNetBackendRep(sym, arg_params, aux_params, device)
-
-    @classmethod
-    def supports_device(cls, device):
-        """Supports only CPU for testing"""
-        return device == 'CPU'
-
-prepare = MXNetBackend.prepare
-
-supports_device = MXNetBackend.supports_device
diff --git a/tests/python-pytest/onnx/import/onnx_import_test.py b/tests/python-pytest/onnx/import/onnx_import_test.py
deleted file mode 100644
index c2d1e9c..0000000
--- a/tests/python-pytest/onnx/import/onnx_import_test.py
+++ /dev/null
@@ -1,275 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-"""
-Tests for individual operators
-This module contains operator tests which currently do not exist on
-ONNX backend test framework. Once we have PRs on the ONNX repo and get
-those PRs merged, this file will get EOL'ed.
-"""
-# pylint: disable=too-many-locals,wrong-import-position,import-error
-from __future__ import absolute_import
-import sys
-import os
-import unittest
-import logging
-import hashlib
-import tarfile
-from collections import namedtuple
-import numpy as np
-import numpy.testing as npt
-from onnx import helper
-from onnx import numpy_helper
-from onnx import TensorProto
-from mxnet.test_utils import download
-from mxnet.contrib import onnx as onnx_mxnet
-import mxnet as mx
-CURR_PATH = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
-sys.path.insert(0, os.path.join(CURR_PATH, '../../../python/unittest'))
-from common import with_seed
-import mxnet_backend
-
-
-URLS = {
-    'bvlc_googlenet' :
-        'https://s3.amazonaws.com/onnx-mxnet/model-zoo/opset7/bvlc_googlenet.tar.gz',
-    'bvlc_reference_caffenet' :
-        'https://s3.amazonaws.com/onnx-mxnet/model-zoo/opset7/bvlc_reference_caffenet.tar.gz',
-    'bvlc_reference_rcnn_ilsvrc13' :
-        'https://s3.amazonaws.com/onnx-mxnet/model-zoo/opset7/bvlc_reference_rcnn_ilsvrc13.tar.gz',
-}
-
-@with_seed()
-def test_broadcast():
-    """Test for broadcasting in onnx operators."""
-    input1 = np.random.rand(1, 3, 4, 5).astype("float32")
-    input2 = np.random.rand(1, 5).astype("float32")
-    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
-              helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
-
-    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
-
-    nodes = [helper.make_node("Add", ["input1", "input2"], ["output"])]
-
-    graph = helper.make_graph(nodes,
-                              "bcast_test",
-                              inputs,
-                              outputs)
-
-    bcast_model = helper.make_model(graph)
-    
-    bkd_rep = mxnet_backend.prepare(bcast_model)
-    numpy_op = input1 + input2
-    output = bkd_rep.run([input1, input2])
-    npt.assert_almost_equal(output[0], numpy_op)
-
-@with_seed()
-def test_greater():
-    """Test for logical greater in onnx operators."""
-    input1 = np.random.rand(1, 3, 4, 5).astype("float32")
-    input2 = np.random.rand(1, 5).astype("float32")
-    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
-              helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
-
-    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
-
-    nodes = [helper.make_node("Greater", ["input1", "input2"], ["output"])]
-
-    graph = helper.make_graph(nodes,
-                              "greater_test",
-                              inputs,
-                              outputs)
-
-    greater_model = helper.make_model(graph)
-    
-    bkd_rep = mxnet_backend.prepare(greater_model)
-    numpy_op = np.greater(input1, input2).astype(np.float32)
-    output = bkd_rep.run([input1, input2])
-    npt.assert_almost_equal(output[0], numpy_op)
-
-@with_seed()
-def test_lesser():
-    """Test for logical greater in onnx operators."""
-    input1 = np.random.rand(1, 3, 4, 5).astype("float32")
-    input2 = np.random.rand(1, 5).astype("float32")
-    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
-              helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
-
-    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
-
-    nodes = [helper.make_node("Less", ["input1", "input2"], ["output"])]
-
-    graph = helper.make_graph(nodes,
-                              "lesser_test",
-                              inputs,
-                              outputs)
-
-    greater_model = helper.make_model(graph)
-    
-    bkd_rep = mxnet_backend.prepare(greater_model)
-    numpy_op = np.less(input1, input2).astype(np.float32)
-    output = bkd_rep.run([input1, input2])
-    npt.assert_almost_equal(output[0], numpy_op)
-    
-@with_seed()
-def test_equal():
-    """Test for logical greater in onnx operators."""
-    input1 = np.random.rand(1, 3, 4, 5).astype("float32")
-    input2 = np.random.rand(1, 5).astype("float32")
-    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
-              helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
-
-    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
-
-    nodes = [helper.make_node("Equal", ["input1", "input2"], ["output"])]
-
-    graph = helper.make_graph(nodes,
-                              "equal_test",
-                              inputs,
-                              outputs)
-
-    greater_model = helper.make_model(graph)
-    
-    bkd_rep = mxnet_backend.prepare(greater_model)
-    numpy_op = np.equal(input1, input2).astype(np.float32)
-    output = bkd_rep.run([input1, input2])
-    npt.assert_almost_equal(output[0], numpy_op)
-
-
-def get_test_files(name):
-    """Extract tar file and returns model path and input, output data"""
-    tar_name = download(URLS.get(name), dirname=CURR_PATH.__str__())
-    # extract tar file
-    tar_path = os.path.join(CURR_PATH, tar_name)
-    tar = tarfile.open(tar_path.__str__(), "r:*")
-    tar.extractall(path=CURR_PATH.__str__())
-    tar.close()
-    data_dir = os.path.join(CURR_PATH, name)
-    model_path = os.path.join(data_dir, 'model.onnx')
-
-    inputs = []
-    outputs = []
-    # get test files
-    for test_file in os.listdir(data_dir):
-        case_dir = os.path.join(data_dir, test_file)
-        # skip the non-dir files
-        if not os.path.isdir(case_dir):
-            continue
-        input_file = os.path.join(case_dir, 'input_0.pb')
-        input_tensor = TensorProto()
-        with open(input_file, 'rb') as proto_file:
-            input_tensor.ParseFromString(proto_file.read())
-        inputs.append(numpy_helper.to_array(input_tensor))
-
-        output_tensor = TensorProto()
-        output_file = os.path.join(case_dir, 'output_0.pb')
-        with open(output_file, 'rb') as proto_file:
-            output_tensor.ParseFromString(proto_file.read())
-        outputs.append(numpy_helper.to_array(output_tensor))
-
-    return model_path, inputs, outputs
-
-def test_bvlc_googlenet():
-    """ Tests Googlenet model"""
-    model_path, inputs, outputs = get_test_files('bvlc_googlenet')
-    logging.info("Translating Googlenet model from ONNX to Mxnet")
-    sym, arg_params, aux_params = onnx_mxnet.import_model(model_path)
-    metadata = onnx_mxnet.get_model_metadata(model_path)
-    assert len(metadata) == 2
-    assert metadata.get('input_tensor_data')
-    assert metadata.get('input_tensor_data') == [(u'data_0', (1, 3, 224, 224))]
-    assert metadata.get('output_tensor_data')
-    assert metadata.get('output_tensor_data') == [(u'prob_1', (1, 1000))]
-    data_names = [input_name[0] for input_name in metadata.get('input_tensor_data')]
-
-    # run test for each test file
-    for input_data, output_data in zip(inputs, outputs):
-        # create module
-        mod = mx.mod.Module(symbol=sym, data_names=data_names, context=mx.cpu(), label_names=None)
-        mod.bind(for_training=False, data_shapes=[(data_names[0], input_data.shape)], label_shapes=None)
-        mod.set_params(arg_params=arg_params, aux_params=aux_params,
-                       allow_missing=True, allow_extra=True)
-        # run inference
-        batch = namedtuple('Batch', ['data'])
-        mod.forward(batch([mx.nd.array(input_data)]), is_train=False)
-
-        # verify the results
-        npt.assert_equal(mod.get_outputs()[0].shape, output_data.shape)
-        npt.assert_almost_equal(output_data, mod.get_outputs()[0].asnumpy(), decimal=3)
-    logging.info("Googlenet model conversion Successful")
-
-def test_bvlc_reference_caffenet():
-    """Tests the bvlc cafenet model"""
-    model_path, inputs, outputs = get_test_files('bvlc_reference_caffenet')
-    logging.info("Translating Caffenet model from ONNX to Mxnet")
-    sym, arg_params, aux_params = onnx_mxnet.import_model(model_path)
-    metadata = onnx_mxnet.get_model_metadata(model_path)
-    assert len(metadata) == 2
-    assert metadata.get('input_tensor_data')
-    assert metadata.get('input_tensor_data') == [(u'data_0', (1, 3, 224, 224))]
-    assert metadata.get('output_tensor_data')
-    assert metadata.get('output_tensor_data') == [(u'prob_1', (1, 1000))]
-    data_names = [input_name[0] for input_name in metadata.get('input_tensor_data')]
-
-    # run test for each test file
-    for input_data, output_data in zip(inputs, outputs):
-        # create module
-        mod = mx.mod.Module(symbol=sym, data_names=data_names, context=mx.cpu(), label_names=None)
-        mod.bind(for_training=False, data_shapes=[(data_names[0], input_data.shape)], label_shapes=None)
-        mod.set_params(arg_params=arg_params, aux_params=aux_params,
-                       allow_missing=True, allow_extra=True)
-        # run inference
-        batch = namedtuple('Batch', ['data'])
-        mod.forward(batch([mx.nd.array(input_data)]), is_train=False)
-
-        # verify the results
-        npt.assert_equal(mod.get_outputs()[0].shape, output_data.shape)
-        npt.assert_almost_equal(output_data, mod.get_outputs()[0].asnumpy(), decimal=3)
-    logging.info("Caffenet model conversion Successful")
-
-def test_bvlc_rcnn_ilsvrc13():
-    """Tests the bvlc rcnn model"""
-    model_path, inputs, outputs = get_test_files('bvlc_reference_rcnn_ilsvrc13')
-    logging.info("Translating rcnn_ilsvrc13 model from ONNX to Mxnet")
-    sym, arg_params, aux_params = onnx_mxnet.import_model(model_path)
-    metadata = onnx_mxnet.get_model_metadata(model_path)
-    assert len(metadata) == 2
-    assert metadata.get('input_tensor_data')
-    assert metadata.get('input_tensor_data') == [(u'data_0', (1, 3, 224, 224))]
-    assert metadata.get('output_tensor_data')
-    assert metadata.get('output_tensor_data') == [(u'fc-rcnn_1', (1, 200))]
-    data_names = [input_name[0] for input_name in metadata.get('input_tensor_data')]
-
-    # run test for each test file
-    for input_data, output_data in zip(inputs, outputs):
-        # create module
-        mod = mx.mod.Module(symbol=sym, data_names=data_names, context=mx.cpu(), label_names=None)
-        mod.bind(for_training=False, data_shapes=[(data_names[0], input_data.shape)], label_shapes=None)
-        mod.set_params(arg_params=arg_params, aux_params=aux_params,
-                       allow_missing=True, allow_extra=True)
-        # run inference
-        batch = namedtuple('Batch', ['data'])
-        mod.forward(batch([mx.nd.array(input_data)]), is_train=False)
-
-        # verify the results
-        npt.assert_equal(mod.get_outputs()[0].shape, output_data.shape)
-        npt.assert_almost_equal(output_data, mod.get_outputs()[0].asnumpy(), decimal=3)
-    logging.info("rcnn_ilsvrc13 model conversion Successful")
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/tests/python-pytest/onnx/import/test_cases.py b/tests/python-pytest/onnx/import/test_cases.py
deleted file mode 100644
index e0b26cc..0000000
--- a/tests/python-pytest/onnx/import/test_cases.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-"""Test Cases to be run for the import module"""
-
-IMPLEMENTED_OPERATORS_TEST = [
-    'test_split_equal'
-    'test_random_',
-    'test_add',
-    'test_sub',
-    'test_mul',
-    'test_div',
-    'test_neg',
-    'test_abs',
-    'test_sum',
-    'test_tanh',
-    'test_ceil',
-    'test_floor',
-    'test_concat',
-    'test_identity',
-    'test_sigmoid',
-    'test_relu',
-    'test_constant_pad',
-    'test_edge_pad',
-    'test_reflect_pad',
-    'test_squeeze_',
-    'test_unsqueeze',
-    'test_softmax_example',
-    'test_softmax_large_number',
-    'test_softmax_axis_2',
-    'test_transpose',
-    'test_globalmaxpool',
-    'test_globalaveragepool',
-    'test_global_lppooling',
-    'test_slice_cpu',
-    'test_slice_neg',
-    'test_reciprocal',
-    'test_sqrt',
-    'test_pow',
-    'test_exp_',
-    'test_argmax',
-    'test_argmin',
-    'test_min',
-    # enabling partial test cases for matmul
-    'test_matmul_3d',
-    'test_matmul_4d',
-    'test_clip',
-    'test_softsign',
-    'test_reduce_',
-    'test_softplus',
-    'test_mean',
-    'test_acos',
-    'test_asin',
-    'test_atan',
-    'test_cos',
-    'test_sin',
-    'test_tan',
-    'test_shape',
-    'test_hardsigmoid',
-    'test_averagepool_1d',
-    'test_averagepool_2d_pads_count_include_pad',
-    'test_averagepool_2d_precomputed_pads_count_include_pad',
-    'test_averagepool_2d_precomputed_strides',
-    'test_averagepool_2d_strides',
-    'test_averagepool_3d',
-    'test_LpPool_',
-    'test_cast',
-    'test_instancenorm',
-    #pytorch operator tests
-    'test_operator_exp',
-    'test_operator_maxpool',
-    'test_operator_params',
-    'test_operator_permute2',
-    'test_depthtospace',
-    'test_size'
-    ]
-
-BASIC_MODEL_TESTS = [
-    'test_AvgPool2D',
-    'test_BatchNorm',
-    'test_ConstantPad2d'
-    'test_Conv2d',
-    'test_ELU',
-    'test_LeakyReLU',
-    'test_MaxPool',
-    'test_PReLU',
-    'test_ReLU',
-    'test_selu_default',
-    'test_Sigmoid',
-    'test_Softmax',
-    'test_softmax_functional',
-    'test_softmax_lastdim',
-    'test_Tanh'
-    ]
-
-STANDARD_MODEL = [
-    'test_bvlc_alexnet',
-    'test_densenet121',
-    #'test_inception_v1',
-    #'test_inception_v2',
-    'test_resnet50',
-    #'test_shufflenet',
-    'test_squeezenet',
-    'test_zfnet512',
-    'test_vgg19'
-    ]
diff --git a/tests/python-pytest/onnx/import/mxnet_backend_test.py b/tests/python-pytest/onnx/mxnet_backend_test.py
similarity index 60%
rename from tests/python-pytest/onnx/import/mxnet_backend_test.py
rename to tests/python-pytest/onnx/mxnet_backend_test.py
index d9e4dcc..bf249fe 100644
--- a/tests/python-pytest/onnx/import/mxnet_backend_test.py
+++ b/tests/python-pytest/onnx/mxnet_backend_test.py
@@ -21,35 +21,26 @@ from __future__ import division
 from __future__ import print_function
 from __future__ import unicode_literals
 
+
 import unittest
+import backend as mxnet_backend
+import backend_test
+
 try:
     import onnx.backend.test
 except ImportError:
-    raise ImportError("Onnx and protobuf need to be installed. Instructions to"
-                      + " install - https://github.com/onnx/onnx#installation")
-
-import mxnet_backend
-import test_cases
+    raise ImportError("Onnx and protobuf need to be installed")
 
+operations = ['import', 'export']
 # This is a pytest magic variable to load extra plugins
 pytest_plugins = "onnx.backend.test.report",
 
-BACKEND_TESTS = onnx.backend.test.BackendTest(mxnet_backend, __name__)
-
-for op_tests in test_cases.IMPLEMENTED_OPERATORS_TEST:
-    BACKEND_TESTS.include(op_tests)
-
-for basic_model_test in test_cases.BASIC_MODEL_TESTS:
-    BACKEND_TESTS.include(basic_model_test)
-
-for std_model_test in test_cases.STANDARD_MODEL:
-    BACKEND_TESTS.include(std_model_test)
-
-BACKEND_TESTS.exclude('.*broadcast.*')
-BACKEND_TESTS.exclude('.*bcast.*')
 
-# import all test cases at global scope to make them visible to python.unittest
-globals().update(BACKEND_TESTS.enable_report().test_cases)
+for operation in operations:
+    mxnet_backend.MXNetBackend.set_params('mxnet', operation)
+    BACKEND_TESTS = backend_test.prepare_tests(mxnet_backend, operation)
+    # import all test cases at global scope to make them visible to python.unittest
+    globals().update(BACKEND_TESTS.enable_report().test_cases)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/tests/python-pytest/onnx/mxnet_export_test.py b/tests/python-pytest/onnx/mxnet_export_test.py
new file mode 100644
index 0000000..6c81198
--- /dev/null
+++ b/tests/python-pytest/onnx/mxnet_export_test.py
@@ -0,0 +1,121 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+# pylint: disable=too-many-locals,wrong-import-position,import-error
+from __future__ import absolute_import
+import os
+import unittest
+import logging
+import tempfile
+from mxnet import nd, sym
+from mxnet.gluon import nn
+from mxnet.contrib import onnx as onnx_mxnet
+import mxnet as mx
+
+logger = logging.getLogger()
+logger.setLevel(logging.DEBUG)
+
+
+def _assert_sym_equal(lhs, rhs):
+    assert lhs.list_inputs() == rhs.list_inputs()  # input names must be identical
+    assert len(lhs.list_outputs()) == len(rhs.list_outputs())  # number of outputs must be identical
+
+
+def _force_list(output):
+    if isinstance(output, nd.NDArray):
+        return [output]
+    return list(output)
+
+
+def _optional_group(symbols, group=False):
+    if group:
+        return sym.Group(symbols)
+    else:
+        return symbols
+
+
+def _check_onnx_export(net, group_outputs=False, shape_type=tuple, extra_params={}):
+    net.initialize()
+    data = nd.random.uniform(0, 1, (1, 1024))
+    output = _force_list(net(data))  # initialize weights
+    net_sym = _optional_group(net(sym.Variable('data')), group_outputs)
+    net_params = {name: param._reduce() for name, param in net.collect_params().items()}
+    net_params.update(extra_params)
+    with tempfile.TemporaryDirectory() as tmpdirname:
+        onnx_file_path = os.path.join(tmpdirname, 'net.onnx')
+        export_path = onnx_mxnet.export_model(
+            sym=net_sym,
+            params=net_params,
+            input_shape=[shape_type(data.shape)],
+            onnx_file_path=onnx_file_path)
+        assert export_path == onnx_file_path
+        # Try importing the model to symbol
+        _assert_sym_equal(net_sym, onnx_mxnet.import_model(export_path)[0])
+
+        # Try importing the model to gluon
+        imported_net = onnx_mxnet.import_to_gluon(export_path, ctx=None)
+        _assert_sym_equal(net_sym, _optional_group(imported_net(sym.Variable('data')), group_outputs))
+
+        # Confirm network outputs are the same
+        imported_net_output = _force_list(imported_net(data))
+        for out, imp_out in zip(output, imported_net_output):
+            mx.test_utils.assert_almost_equal(out.asnumpy(), imp_out.asnumpy())
+
+
+class TestExport(unittest.TestCase):
+    """ Tests ONNX export.
+    """
+
+    def test_onnx_export_single_output(self):
+        net = nn.HybridSequential(prefix='single_output_net')
+        with net.name_scope():
+            net.add(nn.Dense(100, activation='relu'), nn.Dense(10))
+        _check_onnx_export(net)
+
+    def test_onnx_export_multi_output(self):
+        class MultiOutputBlock(nn.HybridBlock):
+            def __init__(self):
+                super(MultiOutputBlock, self).__init__()
+                with self.name_scope():
+                    self.net = nn.HybridSequential()
+                    for i in range(10):
+                        self.net.add(nn.Dense(100 + i * 10, activation='relu'))
+
+            def hybrid_forward(self, F, x):
+                out = tuple(block(x) for block in self.net._children.values())
+                return out
+
+        net = MultiOutputBlock()
+        assert len(sym.Group(net(sym.Variable('data'))).list_outputs()) == 10
+        _check_onnx_export(net, group_outputs=True)
+
+    def test_onnx_export_list_shape(self):
+        net = nn.HybridSequential(prefix='list_shape_net')
+        with net.name_scope():
+            net.add(nn.Dense(100, activation='relu'), nn.Dense(10))
+        _check_onnx_export(net, shape_type=list)
+
+    def test_onnx_export_extra_params(self):
+        net = nn.HybridSequential(prefix='extra_params_net')
+        with net.name_scope():
+            net.add(nn.Dense(100, activation='relu'), nn.Dense(10))
+        _check_onnx_export(net, extra_params={'extra_param': nd.array([1, 2])})
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/python-pytest/onnx/test_cases.py b/tests/python-pytest/onnx/test_cases.py
new file mode 100644
index 0000000..1001836
--- /dev/null
+++ b/tests/python-pytest/onnx/test_cases.py
@@ -0,0 +1,132 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+IMPLEMENTED_OPERATORS_TEST = {
+    'both': ['test_add',
+             'test_sub',
+             'test_mul',
+             'test_div',
+             'test_neg',
+             'test_abs',
+             'test_sum',
+             'test_tanh',
+             'test_ceil',
+             'test_floor',
+             'test_concat',
+             'test_identity',
+             'test_sigmoid',
+             'test_relu',
+             'test_constant_pad',
+             'test_edge_pad',
+             'test_reflect_pad',
+             'test_softmax_example',
+             'test_softmax_large_number',
+             'test_softmax_axis_2',
+             'test_transpose',
+             'test_globalmaxpool',
+             'test_globalaveragepool',
+             'test_slice_cpu',
+             'test_slice_neg',
+             'test_reciprocal',
+             'test_sqrt',
+             'test_pow',
+             'test_exp_',
+             'test_argmax',
+             'test_argmin',
+             'test_min',
+             # pytorch operator tests
+             'test_operator_exp',
+             'test_operator_maxpool',
+             'test_operator_params',
+             'test_operator_permute2',
+             'test_cos',
+             'test_sin',
+             'test_tan',
+             'test_acos',
+             'test_asin',
+             'test_atan',
+             'test_squeeze',
+             'test_matmul_3d',
+             'test_matmul_4d',
+             'test_depthtospace',
+             'test_hardsigmoid',
+             'test_instancenorm',
+             'test_shape',
+             'test_cast',
+             'test_clip',
+             'test_size'
+             ],
+    'import': ['test_unsqueeze',
+               'test_global_lppooling',
+               'test_softsign',
+               'test_reduce_',
+               'test_softplus',
+               'test_mean',
+               'test_averagepool_1d',
+               'test_averagepool_2d_pads_count_include_pad',
+               'test_averagepool_2d_precomputed_pads_count_include_pad',
+               'test_averagepool_2d_precomputed_strides',
+               'test_averagepool_2d_strides',
+               'test_averagepool_3d',
+               'test_LpPool_',
+               'test_split_equal'
+               'test_random_',
+               ],
+    'export': ['test_random_uniform',
+               'test_random_normal',
+               'test_reduce_min',
+               'test_reduce_max',
+               'test_squeeze',
+               'test_reduce_mean',
+               'test_reduce_prod',
+               'test_reduce_sum_d',
+               'test_reduce_sum_keepdims_random',
+               'test_max_',
+               ]
+}
+
+BASIC_MODEL_TESTS = {
+    'both': ['test_AvgPool2D',
+             'test_BatchNorm',
+             'test_ConstantPad2d'
+             'test_Conv2d',
+             'test_ELU',
+             'test_LeakyReLU',
+             'test_MaxPool',
+             'test_PReLU',
+             'test_ReLU',
+             'test_selu_default',
+             'test_Sigmoid',
+             'test_Softmax',
+             'test_softmax_functional',
+             'test_softmax_lastdim',
+             'test_Tanh']
+}
+
+STANDARD_MODEL = {
+    'both': ['test_bvlc_alexnet',
+             'test_densenet121',
+             # 'test_inception_v1',
+             # 'test_inception_v2',
+             'test_resnet50',
+             # 'test_shufflenet',
+             'test_squeezenet',
+             'test_vgg19'
+             ],
+    'import': ['test_zfnet512'],
+    'export': ['test_vgg16']
+}
diff --git a/tests/python-pytest/onnx/test_models.py b/tests/python-pytest/onnx/test_models.py
new file mode 100644
index 0000000..12bc271
--- /dev/null
+++ b/tests/python-pytest/onnx/test_models.py
@@ -0,0 +1,167 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+# pylint: disable=too-many-locals,wrong-import-position,import-error
+from __future__ import absolute_import
+import sys
+import os
+import unittest
+import logging
+import tarfile
+from collections import namedtuple
+import numpy as np
+import numpy.testing as npt
+from onnx import numpy_helper
+from onnx import TensorProto
+from mxnet.test_utils import download
+from mxnet.contrib import onnx as onnx_mxnet
+import mxnet as mx
+
+CURR_PATH = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
+sys.path.insert(0, os.path.join(CURR_PATH, '../../python/unittest'))
+
+
+logger = logging.getLogger()
+logger.setLevel(logging.DEBUG)
+URLS = {
+    'bvlc_googlenet':
+        'https://s3.amazonaws.com/download.onnx/models/opset_8/bvlc_googlenet.tar.gz',
+    'bvlc_reference_caffenet':
+        'https://s3.amazonaws.com/download.onnx/models/opset_8/bvlc_reference_caffenet.tar.gz',
+    'bvlc_reference_rcnn_ilsvrc13':
+        'https://s3.amazonaws.com/download.onnx/models/opset_8/bvlc_reference_rcnn_ilsvrc13.tar.gz',
+    'inception_v1':
+        'https://s3.amazonaws.com/download.onnx/models/opset_8/inception_v1.tar.gz',
+    'inception_v2':
+        'https://s3.amazonaws.com/download.onnx/models/opset_8/inception_v2.tar.gz'
+}
+
+
+def get_test_files(name):
+    """Extract tar file and returns model path and input, output data"""
+    tar_name = download(URLS.get(name), dirname=CURR_PATH.__str__())
+    # extract tar file
+    tar_path = os.path.join(CURR_PATH, tar_name)
+    tar = tarfile.open(tar_path.__str__(), "r:*")
+    tar.extractall(path=CURR_PATH.__str__())
+    tar.close()
+    data_dir = os.path.join(CURR_PATH, name)
+    model_path = os.path.join(data_dir, 'model.onnx')
+
+    inputs = []
+    outputs = []
+    # get test files
+    for test_file in os.listdir(data_dir):
+        case_dir = os.path.join(data_dir, test_file)
+        # skip the non-dir files
+        if not os.path.isdir(case_dir):
+            continue
+        input_file = os.path.join(case_dir, 'input_0.pb')
+        input_tensor = TensorProto()
+        with open(input_file, 'rb') as proto_file:
+            input_tensor.ParseFromString(proto_file.read())
+        inputs.append(numpy_helper.to_array(input_tensor))
+
+        output_tensor = TensorProto()
+        output_file = os.path.join(case_dir, 'output_0.pb')
+        with open(output_file, 'rb') as proto_file:
+            output_tensor.ParseFromString(proto_file.read())
+        outputs.append(numpy_helper.to_array(output_tensor))
+
+    return model_path, inputs, outputs
+
+
+def forward_pass(sym, arg, aux, data_names, input_data):
+    """ Perform forward pass on given data"""
+    # create module
+    mod = mx.mod.Module(symbol=sym, data_names=data_names, context=mx.cpu(), label_names=None)
+    mod.bind(for_training=False, data_shapes=[(data_names[0], input_data.shape)], label_shapes=None)
+    mod.set_params(arg_params=arg, aux_params=aux,
+                   allow_missing=True, allow_extra=True)
+    # run inference
+    batch = namedtuple('Batch', ['data'])
+    mod.forward(batch([mx.nd.array(input_data)]), is_train=False)
+
+    return mod.get_outputs()[0].asnumpy()
+
+
+class TestModel(unittest.TestCase):
+    """ Tests for models.
+    Tests are dynamically added.
+    Therefore edit test_models to add more tests.
+    """
+    def test_import_export(self):
+        def get_model_results(modelpath):
+            symbol, args, aux = onnx_mxnet.import_model(modelpath)
+
+            data = onnx_mxnet.get_model_metadata(modelpath)
+            data_names = [input_name[0] for input_name in data.get('input_tensor_data')]
+
+            result = []
+            for input_data, output_data in zip(inputs, outputs):
+                output = forward_pass(symbol, args, aux, data_names, input_data)
+                result.append(output)
+            return symbol, args, aux, result, data
+
+        for test in test_cases:
+            model_name, input_shape, output_shape = test
+            with self.subTest(model_name):
+                model_path, inputs, outputs = get_test_files(model_name)
+                logging.info("Translating " + model_name + " from ONNX model zoo to MXNet")
+
+                sym, arg_params, aux_params, expected_result, _ = get_model_results(model_path)
+
+                params = {}
+                params.update(arg_params)
+                params.update(aux_params)
+
+                dir_path = os.path.dirname(model_path)
+                new_model_name = "exported_" + model_name + ".onnx"
+                onnx_file = os.path.join(dir_path, new_model_name)
+
+                logging.info("Translating converted model from mxnet to ONNX")
+                converted_model_path = onnx_mxnet.export_model(sym, params, [input_shape], np.float32, onnx_file)
+
+                sym, arg_params, aux_params, actual_result, metadata = get_model_results(converted_model_path)
+
+                assert len(metadata) == 2
+                assert metadata.get('input_tensor_data')
+                assert metadata.get('input_tensor_data')[0][1] == input_shape
+                assert metadata.get('output_tensor_data')
+                assert metadata.get('output_tensor_data')[0][1] == output_shape
+
+                # verify the results
+                for expected, actual in zip(expected_result, actual_result):
+                    npt.assert_equal(expected.shape, actual.shape)
+                    npt.assert_almost_equal(expected, actual, decimal=3)
+
+                logging.info(model_name + " conversion successful")
+
+
+# test_case = ("model name", input shape, output shape)
+test_cases = [
+    ("bvlc_googlenet", (1, 3, 224, 224), (1, 1000)),
+    ("bvlc_reference_caffenet", (1, 3, 224, 224), (1, 1000)),
+    ("bvlc_reference_rcnn_ilsvrc13", (1, 3, 224, 224), (1, 200)),
+    ("inception_v1", (1, 3, 224, 224), (1, 1000)),
+    ("inception_v2", (1, 3, 224, 224), (1, 1000))
+]
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/python-pytest/onnx/test_node.py b/tests/python-pytest/onnx/test_node.py
new file mode 100644
index 0000000..07ae866
--- /dev/null
+++ b/tests/python-pytest/onnx/test_node.py
@@ -0,0 +1,164 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests for individual operators
+This module contains operator tests which currently do not exist on
+ONNX backend test framework. Once we have PRs on the ONNX repo and get
+those PRs merged, this file will get EOL'ed.
+"""
+# pylint: disable=too-many-locals,wrong-import-position,import-error
+from __future__ import absolute_import
+import sys
+import os
+import unittest
+import logging
+import tarfile
+from collections import namedtuple
+import numpy as np
+import numpy.testing as npt
+from onnx import numpy_helper, helper, load_model
+from onnx import TensorProto
+from mxnet.test_utils import download
+from mxnet.contrib import onnx as onnx_mxnet
+import mxnet as mx
+import backend
+
+CURR_PATH = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
+sys.path.insert(0, os.path.join(CURR_PATH, '../../python/unittest'))
+
+logger = logging.getLogger()
+logger.setLevel(logging.DEBUG)
+
+
+def get_rnd(shape, low=-1.0, high=1.0, dtype=np.float32):
+    if dtype == np.float32:
+        return (np.random.uniform(low, high,
+                                  np.prod(shape)).reshape(shape).astype(np.float32))
+    elif dtype == np.int32:
+        return (np.random.randint(low, high,
+                                  np.prod(shape)).reshape(shape).astype(np.float32))
+    elif dtype == np.bool_:
+        return np.random.choice(a=[False, True], size=shape).astype(np.float32)
+
+
+def forward_pass(sym, arg, aux, data_names, input_data):
+    """ Perform forward pass on given data
+    :param sym: Symbol
+    :param arg: Arg params
+    :param aux: Aux params
+    :param data_names: Input names (list)
+    :param input_data: Input data (list). If there is only one input,
+                        pass it as a list. For example, if input is [1, 2],
+                        pass input_data=[[1, 2]]
+    :return: result of forward pass
+    """
+    data_shapes = []
+    data_forward = []
+    for idx in range(len(data_names)):
+        val = input_data[idx]
+        data_shapes.append((data_names[idx], np.shape(val)))
+        data_forward.append(mx.nd.array(val))
+    # create module
+    mod = mx.mod.Module(symbol=sym, data_names=data_names, context=mx.cpu(), label_names=None)
+    mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None)
+    if not arg and not aux:
+        mod.init_params()
+    else:
+        mod.set_params(arg_params=arg, aux_params=aux,
+                       allow_missing=True, allow_extra=True)
+    # run inference
+    batch = namedtuple('Batch', ['data'])
+    mod.forward(batch(data_forward), is_train=False)
+
+    return mod.get_outputs()[0].asnumpy()
+
+
+class TestNode(unittest.TestCase):
+    """ Tests for models.
+    Tests are dynamically added.
+    Therefore edit test_models to add more tests.
+    """
+
+    def test_import_export(self):
+        def get_input_tensors(input_data):
+            input_tensor = []
+            input_names = []
+            input_sym = []
+            for idx, ip in enumerate(input_data):
+                name = "input" + str(idx + 1)
+                input_sym.append(mx.sym.Variable(name))
+                input_names.append(name)
+                input_tensor.append(helper.make_tensor_value_info(name,
+                                                                  TensorProto.FLOAT, shape=np.shape(ip)))
+            return input_names, input_tensor, input_sym
+
+        def get_onnx_graph(testname, input_names, inputs, output_name, output_shape, attr):
+            outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=output_shape)]
+
+            nodes = [helper.make_node(output_name, input_names, ["output"], **attr)]
+
+            graph = helper.make_graph(nodes, testname, inputs, outputs)
+
+            model = helper.make_model(graph)
+            return model
+
+        for test in test_cases:
+            test_name, mxnet_op, onnx_name, inputs, attrs, mxnet_specific = test
+            with self.subTest(test_name):
+                names, input_tensors, inputsym = get_input_tensors(inputs)
+                test_op = mxnet_op(*inputsym, **attrs)
+                mxnet_output = forward_pass(test_op, None, None, names, inputs)
+                outputshape = np.shape(mxnet_output)
+
+                if mxnet_specific:
+                    onnxmodelfile = onnx_mxnet.export_model(test_op, {}, [np.shape(ip) for ip in inputs],
+                                                            np.float32,
+                                                            onnx_name + ".onnx")
+                    onnxmodel = load_model(onnxmodelfile)
+                else:
+                    onnxmodel = get_onnx_graph(test_name, names, input_tensors, onnx_name, outputshape, attrs)
+
+                bkd_rep = backend.prepare(onnxmodel, operation='export')
+                output = bkd_rep.run(inputs)
+
+                npt.assert_almost_equal(output[0], mxnet_output)
+
+
+# test_case = ("test_case_name", mxnet op, "ONNX_op_name", [input_list], attribute map, MXNet_specific=True/False)
+test_cases = [
+    ("test_equal", mx.sym.broadcast_equal, "Equal", [get_rnd((1, 3, 4, 5)), get_rnd((1, 5))], {}, False),
+    ("test_greater", mx.sym.broadcast_greater, "Greater", [get_rnd((1, 3, 4, 5)), get_rnd((1, 5))], {}, False),
+    ("test_less", mx.sym.broadcast_lesser, "Less", [get_rnd((1, 3, 4, 5)), get_rnd((1, 5))], {}, False),
+    ("test_and", mx.sym.broadcast_logical_and, "And",
+     [get_rnd((3, 4, 5), dtype=np.bool_), get_rnd((3, 4, 5), dtype=np.bool_)], {}, False),
+    ("test_xor", mx.sym.broadcast_logical_xor, "Xor",
+     [get_rnd((3, 4, 5), dtype=np.bool_), get_rnd((3, 4, 5), dtype=np.bool_)], {}, False),
+    ("test_or", mx.sym.broadcast_logical_or, "Or",
+     [get_rnd((3, 4, 5), dtype=np.bool_), get_rnd((3, 4, 5), dtype=np.bool_)], {}, False),
+    ("test_not", mx.sym.logical_not, "Not", [get_rnd((3, 4, 5), dtype=np.bool_)], {}, False),
+    ("test_square", mx.sym.square, "Pow", [get_rnd((2, 3), dtype=np.int32)], {}, True),
+    ("test_spacetodepth", mx.sym.space_to_depth, "SpaceToDepth", [get_rnd((1, 1, 4, 6))],
+     {'block_size': 2}, False),
+    ("test_softmax", mx.sym.SoftmaxOutput, "Softmax", [get_rnd((1000, 1000)), get_rnd(1000)],
+     {'ignore_label': 0, 'use_ignore': False}, True),
+    ("test_fullyconnected", mx.sym.FullyConnected, "Gemm", [get_rnd((4,3)), get_rnd((4, 3)), get_rnd(4)],
+     {'num_hidden': 4, 'name': 'FC'}, True)
+]
+
+if __name__ == '__main__':
+    unittest.main()