You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by zh...@apache.org on 2020/12/16 18:26:57 UTC

[incubator-mxnet] branch v1.x updated: [v1.x] Update onnx export support for FullyConnected and add unit tests (#19679)

This is an automated email from the ASF dual-hosted git repository.

zha0q1 pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
     new 6b65b91  [v1.x] Update onnx export support for FullyConnected and add unit tests (#19679)
6b65b91 is described below

commit 6b65b91b0490bab91722fb1a296545b123bedea7
Author: Joe Evans <gi...@250hacks.net>
AuthorDate: Wed Dec 16 10:24:50 2020 -0800

    [v1.x] Update onnx export support for FullyConnected and add unit tests (#19679)
    
    * Add tests for FullyConnected onnx export and fix export operator so it works properly.
    
    * Remove unused variables.
    
    * Add coverage to onnx tests.
    
    * Condense code.
    
    * Add more test cases.
    
    * Revert "Add coverage to onnx tests."
    
    This reverts commit 86270bbe728d04e4c8fc6620b6aa21ea1efa93e2.
    
    Co-authored-by: Joe Evans <jo...@amazon.com>
---
 .../mxnet/contrib/onnx/mx2onnx/_op_translations.py | 59 +++++++---------------
 tests/python-pytest/onnx/test_operators.py         | 16 ++++++
 2 files changed, 33 insertions(+), 42 deletions(-)

diff --git a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
index 4225028..5e37493 100644
--- a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
+++ b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
@@ -319,55 +319,30 @@ def convert_fully_connected(node, **kwargs):
     """Map MXNet's FullyConnected operator attributes to onnx's Gemm operator
     and return the created node.
     """
+    from onnx.helper import make_node
     name, input_nodes, attrs = get_inputs(node, kwargs)
-
-    initializer = kwargs["initializer"]
-
+    input_type = kwargs['in_type']
+    dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type]
+    flatten = get_boolean_attribute_value(attrs, "flatten")
     no_bias = get_boolean_attribute_value(attrs, "no_bias")
-
-    fcnode = []
-
-    op_name = "flatten_" + str(kwargs["idx"])
-    flatten_node = onnx.helper.make_node(
-        'Flatten',
-        inputs=[input_nodes[0]],
-        outputs=[op_name],
-        name=op_name
-    )
-
-    input_nodes[0] = op_name
-    fcnode.append(flatten_node)
+    nodes = []
+    if flatten:
+        nodes.append(make_node("Flatten", [input_nodes[0]], [name+"_flatten0_out"]))
+        in_nodes = [name+"_flatten0_out", input_nodes[1]]
+    else:
+        in_nodes = [input_nodes[0], input_nodes[1]]
 
     if no_bias:
-        data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]
-        bias_name = "bias" + str(kwargs["idx"])
-        tensor_node = onnx.helper.make_tensor_value_info(bias_name, data_type, (1,))
-        initializer.append(
-            onnx.helper.make_tensor(
-                name=bias_name,
-                data_type=data_type,
-                dims=(1,),
-                vals=[0],
-                raw=False,
-            )
-        )
-        input_nodes.append(bias_name)
-        fcnode.append(tensor_node)
+        create_const_scalar_node(name+"_bias", np.array([0], dtype=dtype), kwargs)
+        in_nodes.append(name+"_bias")
+    else:
+        in_nodes.append(input_nodes[2])
 
-    node = onnx.helper.make_node(
-        "Gemm",
-        input_nodes,  # input (A, B, C) - C can be in place
-        [name],  # output
-        alpha=1.0,
-        beta=1.0,
-        transA=False,
-        transB=True,
-        name=name
+    nodes.append(
+        make_node("Gemm", in_nodes, [name], alpha=1.0, beta=1.0, transA=0, transB=1, name=name)
     )
 
-    fcnode.append(node)
-
-    return fcnode
+    return nodes
 
 
 @mx_op.register("BatchNorm")
diff --git a/tests/python-pytest/onnx/test_operators.py b/tests/python-pytest/onnx/test_operators.py
index c537ee3..4f6baca 100644
--- a/tests/python-pytest/onnx/test_operators.py
+++ b/tests/python-pytest/onnx/test_operators.py
@@ -145,3 +145,19 @@ def test_onnx_export_contrib_interleaved_matmul_selfatt_qk(tmp_path, dtype):
     M2 = def_model('contrib.interleaved_matmul_selfatt_qk', heads=5)
     x2 = mx.nd.random.uniform(0, 1, (7, 5, 4*5*6))
     op_export_test('contrib_interleaved_matmul_selfatt_qk_2', M2, [x2], tmp_path)
+
+
+@pytest.mark.parametrize('dtype', ['float32', 'float64', 'int32', 'int64'])
+@pytest.mark.parametrize('num_hidden', [1, 5, 10, 20])
+@pytest.mark.parametrize('no_bias', [False, True])
+@pytest.mark.parametrize('flatten', [True, False])
+def test_onnx_export_fully_connected(tmp_path, dtype, num_hidden, no_bias, flatten):
+    M = def_model('FullyConnected', num_hidden=num_hidden, no_bias=no_bias, flatten=flatten)
+    x = mx.nd.random.uniform(-0.5, 0.5, (5, 325))
+    weight = mx.nd.random.uniform(0, 1, (num_hidden, 325))
+    args = [x, weight]
+    if not no_bias:
+        args.append(mx.nd.random.uniform(0,1,(num_hidden,)))
+    op_export_test('FullyConnected', M, args, tmp_path)
+
+