You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2020/06/17 22:36:28 UTC

[GitHub] [incubator-tvm] alexwong commented on a change in pull request #5052: [TARGET] ONNX codegen

alexwong commented on a change in pull request #5052:
URL: https://github.com/apache/incubator-tvm/pull/5052#discussion_r441870801



##########
File path: python/tvm/contrib/target/onnx.py
##########
@@ -0,0 +1,899 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines, redefined-builtin
+"""Relay to ONNX codegen """
+
+import os
+import struct
+import copy
+import numpy
+import onnx
+import onnx.utils
+from onnx import numpy_helper, OperatorSetIdProto, defs
+import tvm
+from tvm import relay
+import tvm._ffi
+from tvm.relay.expr_functor import ExprVisitor
+from tvm.relay.ty import TupleType, TensorType
+
+ONNX_OPSET_VERSONS_SUPPORTED = [11]
+
+
+def tvm_array_to_list(arr):
+    return tuple(x.value for x in arr)
+
+
+def get_onnx_version():
+    return onnx.__version__
+
+
+def infer_type(node):
+    """A method to infer the type of a relay expression."""
+    mod = tvm.IRModule.from_expr(node)
+    mod = relay.transform.InferType()(mod)
+    entry = mod["main"]
+    return entry if isinstance(node, relay.Function) else entry.body
+
+
+def call_node_infer_type(node):
+    """infer the output types of call node"""
+    infer_out = infer_type(node)
+    out_type = infer_out._checked_type_
+    if isinstance(out_type, TensorType):
+        types = [out_type]
+    elif isinstance(out_type, TupleType):
+        types = list(out_type.fields)
+    else:
+        raise RuntimeError("Unsupported output type %s in operator %s"
+                           % (type(out_type), node.op.nae))
+
+    return types
+
+
+def add_input(data, name, model_container):
+    dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[data.dtype]
+    tensor_value_info = onnx.helper.make_tensor_value_info(name, dtype, shape=data.shape)
+    model_container.add_inputs([tensor_value_info])
+    data_tensor = numpy_helper.from_array(data, name)
+    model_container.add_initializers([data_tensor])
+
+
+class OpConverter(object):
+    """ Operator converter Base Class.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        """convert Relay attributes to ONNX attributes.
+           The derived classes should implement this method
+           if attributes are required by the operator
+           otherwise by default no attributes are passed
+        """
+        return {}
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        attrs = cls.convert_attributes(node_entry['relay_node'].attrs)
+        onnx_node = onnx.helper.make_node(cls.__name__,
+                                          node_entry['input_names'],
+                                          node_entry['output_names'],
+                                          **attrs)
+        model_container.add_nodes([onnx_node])
+
+
+def rename(op_name):
+    """ This method creates dynamic operator of name op_name with empty attributes
+    """
+    return type(op_name, (OpConverter,), {})
+
+
+class Reshape(object):
+    """ Operator converter for Reshape.
+    """
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        """Converts Relay operator Reshape to ONNX operator.
+           Relay operator accepts shape as attribute but ONNX operator
+           accepts it as a input.
+        """
+
+        shape = numpy.asarray([a.value for a in node_entry['relay_node'].attrs.newshape],
+                              dtype=numpy.int64)
+        input_name = 'shape{}'.format(node_entry['name'])
+        node = onnx.helper.make_node(cls.__name__, [node_entry['input_names'][0], input_name],
+                                     node_entry['output_names'])
+        model_container.add_nodes([node])
+        add_input(shape, input_name, model_container)
+
+
+class Conv(OpConverter):
+    """ Operator converter for Conv.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'group': attrs.get_int("groups"),
+            'pads': attrs.get_int_tuple("padding"),
+            'strides': attrs.get_int_tuple("strides"),
+            'dilations': attrs.get_int_tuple("dilation"),
+            'kernel_shape': attrs.get_int_tuple("kernel_size"),
+        }
+
+
+class MaxPool(OpConverter):
+    """ Operator converter for MaxPool.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'pads': attrs.get_int_tuple("padding"),
+            'strides': attrs.get_int_tuple("strides"),
+            'kernel_shape': attrs.get_int_tuple("pool_size"),
+        }
+
+
+class Transpose(OpConverter):
+    """ Operator converter for Transpose.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {'perm': attrs.get_int_tuple("axes")} if attrs["axes"] else {}
+
+
+class MatMul(OpConverter):
+    """ Operator converter for MatMul.
+    """
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        inter_output_name = 'inter{}'.format(node_entry['name'])
+        transpose_node = onnx.helper.make_node(Transpose.__name__,
+                                               [node_entry['input_names'][1]],
+                                               [inter_output_name],
+                                               perm=(1, 0))
+        model_container.add_nodes([transpose_node])
+
+        inputs = [node_entry['input_names'][0], inter_output_name]
+        matmul_node = onnx.helper.make_node(cls.__name__, inputs, node_entry['output_names'])
+        model_container.add_nodes([matmul_node])
+
+
+class Flatten(OpConverter):
+    """ Operator converter for Flatten.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'axis': 1,
+        }
+
+
+class BatchNormalization(OpConverter):
+    """ Operator converter for BatchNormalization.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'epsilon': float(attrs.get_str('epsilon')),
+            'axis': float(attrs.get_int('axis')),
+        }
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        """Converts Relay operator batch_norm to ONNX operator.
+           Relay operator has property axis to handle data in NHWC format.
+        """
+        attrs = cls.convert_attributes(node_entry['relay_node'].attrs)
+        transpose_out_name = node_entry['input_names'][0]
+        inter_output_names = [node_entry['output_names'][0]]
+        # axis==3 means channel is specified along the 3rd axis
+        if attrs['axis'] == 3:
+            transpose_out_name = 'transpose_{}'.format(node_entry['name'])
+            node_transposed = onnx.helper.make_node(Transpose.__name__,
+                                                    [node_entry['input_names'][0]],
+                                                    [transpose_out_name],
+                                                    perm=[0, 3, 1, 2])
+            model_container.add_nodes([node_transposed])
+            inter_output_names = ['batch_norm_{}'.format(node_entry['name'])]
+
+        input_names = [transpose_out_name] + node_entry['input_names'][1:]
+        batch_norm_node = onnx.helper.make_node(cls.__name__,
+                                                input_names,
+                                                inter_output_names,
+                                                epsilon=attrs['epsilon'])
+        model_container.add_nodes([batch_norm_node])
+
+        if attrs['axis'] == 3:
+            node_transposed = onnx.helper.make_node(Transpose.__name__,
+                                                    inter_output_names,
+                                                    [node_entry['output_names'][0]],
+                                                    perm=[0, 2, 3, 1])
+            model_container.add_nodes([node_transposed])
+
+
+class Dropout(OpConverter):
+    """ Operator converter for Dropout.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'ratio': float(attrs.get_str('rate')),
+        }
+
+
+class AveragePool(MaxPool):
+    """ Operator converter for AveragePool.
+    """
+
+
+class Concat(OpConverter):
+    """ Operator converter for Concat.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'axis': attrs.get_int("axis"),
+        }
+
+
+class BiasAdd(OpConverter):
+    """ Operator converter for BiasAdd.
+    """
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        input_node = node_dict[node_entry['inputs'][0]]
+        assert len(input_node) == 1, "input node_entry can not be a Tuple"
+        input_node = input_node[0]
+        data_ndim = len(input_node['types'][0].shape)
+        axis = node_entry['relay_node'].attrs.get_int("axis")
+        if axis < 0:
+            axis = axis + data_ndim
+        new_axes = data_ndim - axis - 1
+        if new_axes:
+            inter_output_name = 'inter{}'.format(node_entry['name'])
+            unsqueeze_node = onnx.helper.make_node('Unsqueeze',
+                                                   [node_entry['input_names'][1]],
+                                                   [inter_output_name],
+                                                   axes=tuple(range(1, new_axes + 1)))
+            model_container.add_nodes([unsqueeze_node])
+        else:
+            inter_output_name = node_entry['input_names'][1]
+
+        inputs = [node_entry['input_names'][0], inter_output_name]
+        matmul_node = onnx.helper.make_node('Add', inputs, node_entry['output_names'])
+        model_container.add_nodes([matmul_node])
+
+
+class ReduceMean(OpConverter):
+    """ Operator converter for ReduceMean.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'axes': attrs.axis,
+            'keepdims': 0 if bool(attrs.get_int("keepdims", 0)) is False else 1
+        }
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        input_node = node_dict[node_entry['inputs'][0]]
+        assert len(input_node) == 1, "input node can not be a Tuple"
+        input_node = input_node[0]
+        shape = input_node['types'][0].shape
+        axis = node_entry['relay_node'].attrs.axis
+        axis = list(range(shape.size())) if not axis else tvm_array_to_list(axis)
+        exclude = 0 if not bool(node_entry['relay_node'].attrs.exclude) else 1
+        keepdims = 0 if not bool(node_entry['relay_node'].attrs.keepdims) else 1
+        if exclude:
+            all_axis = list(range(len(shape)))
+            axis = set(all_axis) - set(axis)
+
+        node = onnx.helper.make_node(cls.__name__,
+                                     node_entry['input_names'],
+                                     node_entry['output_names'],
+                                     axes=axis,
+                                     keepdims=keepdims)
+        model_container.add_nodes([node])
+
+
+class Pad(OpConverter):
+    """ Operator converter for Pad.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        before = []
+        after = []
+        for axis_pads in attrs.pad_width:
+            before.append(axis_pads[0])
+            after.append(axis_pads[1])
+        pads = before + after
+        pads = numpy.asarray(pads, dtype=pads[0].dtype)
+        return {
+            'pads': pads,
+            'mode': attrs.get_str('pad_mode'),
+            'constant_value': attrs.pad_value
+        }
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        """Converts Relay operator Pad to ONNX operator.
+           Relay operator accepts pads as attribute but ONNX operator
+           accepts it as a input.
+        """
+        attrs = cls.convert_attributes(node_entry['relay_node'].attrs)
+
+        name = node_entry['name']
+        data = numpy.asarray(attrs['pads'], dtype=attrs['pads'][0].dtype).astype(numpy.int64)
+        input_name = 'pads_{}'.format(name)
+        value = numpy.dtype(node_entry['types'][0].dtype).type(attrs['constant_value'])
+        input_value_name = 'value_{}'.format(name)
+        add_input(data, input_name, model_container)
+        add_input(value, input_value_name, model_container)
+
+        input_names = [node_entry['input_names'][0], input_name, input_value_name]
+        node = onnx.helper.make_node(cls.__name__, input_names, node_entry['output_names'])
+        model_container.add_nodes([node])
+
+
+class Softmax(OpConverter):
+    """ Operator converter for SoftMax.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'axis': attrs.axis,
+        }
+
+
+class Squeeze(OpConverter):
+    """ Operator converter for Squeeze.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'axes': attrs.axis,
+        }
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        input_node = node_dict[node_entry['inputs'][0]]
+        assert len(input_node) == 1, "input node can not be a Tuple"
+        input_node = input_node[0]
+        shape = input_node['types'][0].shape
+        axis = node_entry['relay_node'].attrs.get_int("axis")
+        if not axis:
+            axis = []
+            for axis_idx, val in enumerate(shape):
+                if val.value == 1:
+                    axis.append(axis_idx)
+        else:
+            axis = node_entry['relay_node'].attrs.get_int_tuple("axis")
+
+        node = onnx.helper.make_node(cls.__name__,
+                                     node_entry['input_names'],
+                                     node_entry['output_names'],
+                                     axes=axis)
+        model_container.add_nodes([node])
+
+
+class Slice(OpConverter):
+    """ Operator converter for Slice.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'starts': attrs.get_int_tuple('begin'),
+            'ends': attrs.get_int_tuple('end'),
+            'steps': attrs.get_int_tuple('strides'),
+            'slice_mode': attrs.get_str('slice_mode')
+        }
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        attrs = cls.convert_attributes(node_entry['relay_node'].attrs)
+
+        name = node_entry['name']
+        input_node = node_dict[node_entry['inputs'][0]]
+        assert len(input_node) == 1, "input node can not be a Tuple"
+        input_node = input_node[0]
+        shape = input_node['types'][0].shape
+
+        starts = list(attrs['starts'])
+        ends = list(attrs['ends'])
+        steps = list(attrs['steps'])
+        starts += [0] * (len(shape) - len(starts))
+        ends += [shape[i] + 1 for i in range(len(ends), len(shape))]
+        axes = list(range(len(shape)))
+
+        if attrs['slice_mode'] == 'size':
+            ends = [starts[i] + (shape[i] + 1 if ends[i] < 0 else ends[i])
+                    for i in range(len(shape))]
+            steps = [1] * len(shape)
+        else:
+            steps += [1] * (len(shape) - len(steps))
+
+        def _add_input(val, input_name):
+            val_arr = numpy.asarray(val).astype(numpy.int64)
+            input_name = '{}_{}'.format(name, input_name)
+            add_input(val_arr, input_name, model_container)
+            return input_name
+
+        input_names = []
+        input_names.append(_add_input(starts, 'starts'))
+        input_names.append(_add_input(ends, 'ends'))
+        input_names.append(_add_input(axes, 'axes'))
+        input_names.append(_add_input(steps, 'steps'))
+
+        input_names = [node_entry['input_names'][0]] + input_names
+
+        slice_node = onnx.helper.make_node(cls.__name__,
+                                           input_names,
+                                           node_entry['output_names'])
+        model_container.add_nodes([slice_node])
+
+
+class Split(OpConverter):
+    """ Operator converter for Split.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        indices_or_sections = attrs['indices_or_sections']
+
+        if isinstance(indices_or_sections, (list, tvm.ir.container.Array)):
+            indices_or_sections = attrs.get_int_tuple('indices_or_sections')
+        if isinstance(indices_or_sections, tvm.ir.PrimExpr):
+            indices_or_sections = indices_or_sections.value
+
+        return {
+            'indices_or_section': indices_or_sections,
+            'axis': attrs.get_int('axis'),
+        }
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        attrs = cls.convert_attributes(node_entry['relay_node'].attrs)
+
+        input_node = node_dict[node_entry['inputs'][0]]
+        assert len(input_node) == 1, "input node can not be a Tuple"
+        input_node = input_node[0]
+        shape = input_node['types'][0].concrete_shape
+
+        indices_or_sect = attrs["indices_or_section"]
+        axis = attrs["axis"]
+        axis_length = shape[axis]
+
+        if isinstance(indices_or_sect, int):
+            split = [axis_length // indices_or_sect] * indices_or_sect
+        else:
+            split = []
+            for i in range(len(indices_or_sect) + 1):
+                if i == 0:
+                    split.append(indices_or_sect[0])
+                elif i == len(indices_or_sect):
+                    split.append(axis_length - indices_or_sect[-1])
+                else:
+                    split.append(indices_or_sect[i] - indices_or_sect[i - 1])
+
+        slice_node = onnx.helper.make_node(cls.__name__,
+                                           node_entry['input_names'],
+                                           node_entry['output_names'],
+                                           split=split,
+                                           axis=axis)
+        model_container.add_nodes([slice_node])
+
+
+class ConstantOfShapeZeros(OpConverter):
+    """ Operator converter for ConstantOfShape.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'value': 0
+        }
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        attrs = cls.convert_attributes(node_entry['relay_node'].attrs)
+        input_node = node_dict[node_entry['inputs'][0]]
+        assert len(input_node) == 1, "input node can not be a Tuple"
+        input_node = input_node[0]
+        dtype = input_node['relay_node'].type_annotation.dtype
+        input_shape_name = 'shape_{}'.format(node_entry['name'])
+        shape = [val.value for val in input_node['relay_node'].type_annotation.shape]
+        shape = numpy.asarray(shape).astype(numpy.int64)
+        add_input(shape, input_shape_name, model_container)
+
+        dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(dtype)]
+        tensor_value = onnx.helper.make_tensor("value", dtype,
+                                               [1], [attrs['value']])
+
+        node = onnx.helper.make_node('ConstantOfShape',
+                                     [input_shape_name],
+                                     node_entry['output_names'],
+                                     value=tensor_value)
+        model_container.add_nodes([node])
+
+
+class ConstantOfShapeOnes(ConstantOfShapeZeros):
+    """ Operator converter for ConstantOfShape.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'value': 1
+        }
+
+
+relay_to_onnx_op_mapping = {
+    'reshape': Reshape,
+    'nn.conv2d': Conv,
+    'add': rename('Add'),
+    'nn.relu': rename('Relu'),
+    'transpose': Transpose,
+    'nn.dense': MatMul,
+    'nn.max_pool2d': MaxPool,
+    'nn.batch_flatten': Flatten,
+    'multiply': rename('Mul'),
+    'nn.bias_add': BiasAdd,
+    'nn.batch_norm': BatchNormalization,
+    'nn.global_avg_pool2d': rename('GlobalAveragePool'),
+    'concatenate': Concat,
+    'nn.dropout': Dropout,
+    'nn.avg_pool2d': AveragePool,
+    'divide': rename('Div'),
+    'mean': ReduceMean,
+    'nn.pad': Pad,
+    'nn.softmax': Softmax,
+    'squeeze': Squeeze,
+    'strided_slice': Slice,
+    'greater': rename('Greater'),
+    'less': rename('Less'),
+    'equal': rename('Equal'),
+    'zeros_like': ConstantOfShapeZeros,
+    'ones_like': ConstantOfShapeOnes,
+    'subtract': rename('Sub'),
+    'split': Split
+}
+
+
+class ModelContainer(object):
+    """ A container class to hold  different attributes of ONNX model graph
+    """
+
+    def __init__(self, name, opset_version):
+        self._name = name
+        self._opset_version = opset_version
+        self._inputs = []
+        self._outputs = []
+        self._nodes = []
+        self._initializers = []
+
+    def add_inputs(self, inputs):
+        self._inputs.extend(inputs)
+
+    def add_outputs(self, outputs):
+        self._outputs.extend(outputs)
+
+    def add_nodes(self, nodes):
+        self._nodes.extend(nodes)
+
+    def add_initializers(self, initializers):
+        self._initializers.extend(initializers)
+
+    def _get_opsets(self):
+        opsets = []
+        imp = OperatorSetIdProto()
+        imp.version = self._opset_version
+        opsets.append(imp)
+        return opsets
+
+    def make_model(self):
+        """ Creates the onnx model from the graph """
+        onnx_graph = onnx.helper.make_graph(
+            self._nodes,
+            self._name,
+            self._inputs,
+            self._outputs,
+            self._initializers
+        )
+        kwargs = {}
+        kwargs["opset_imports"] = self._get_opsets()
+        kwargs["producer_name"] = 'TVM Relay'
+        kwargs["producer_version"] = tvm.__version__
+
+        return onnx.helper.make_model(onnx_graph, **kwargs)
+
+
+class RelayToONNXConverter(ExprVisitor):
+    """A helper class to traverse the Relay graph and convert Relay nodes to ONNX model
+
+    Parameters
+    ----------
+    name : str
+       name of the model
+
+    params : dict
+        dict of the parameter names and NDarray values
+
+    opset_version : int
+        target onnx opset version
+
+    """
+
+    def __init__(self, name, params, opset_version):
+        super().__init__()
+        self._name = {}
+        self._mc = ModelContainer(name, opset_version)
+        self._params = params
+        self._node_dict = {}
+        self._node_count = 0
+        self.last_node = None
+
+    @classmethod
+    def _get_node_entry(cls, relay_node, name):
+        return {"relay_node": relay_node,
+                "inputs": [relay_node],  # inputs in the form of relay nodes
+                "types": [],  # output types in case of call nodes else self type
+                "name": name,  # name of the node
+                "input_names": [name],  # input names in case of call nodes else self name
+                "output_names": [name],  # output names in case of call nodes else self name
+                "op": None,  # op name in case of call node else None
+                }
+
+    def convert_to_onnx(self, func):
+        """ Traverse Relay graph and generate a ONNX model"""
+
+        self.visit(func)
+        self._add_output(self._node_dict[self.last_node])
+        model = self._mc.make_model()
+        polished_model = onnx.utils.polish_model(model)
+        return polished_model
+
+    def visit(self, expr):
+        self._node_count += 1
+        super().visit(expr)
+
+    def visit_constant(self, const):
+        node_index = self._node_count
+        name = "Constant_" + str(node_index)
+        node_entry = self._get_node_entry(const, name)
+        node_entry["types"] = [const.checked_type]
+
+        self._add_constant_input(node_entry, node_index)
+        self._node_dict[const] = [node_entry]
+
+    def visit_var(self, var):
+        node_index = self._node_count
+        node_entry = self._get_node_entry(var, var.name_hint)
+        node_entry["types"] = [var.type_annotation]
+
+        self._add_input(node_entry, node_index)
+        self._node_dict[var] = [node_entry]
+
+    def visit_tuple(self, tup):
+        self._node_dict[tup] = []
+        for f in tup.fields:
+            self.visit(f)
+            self._node_dict[tup].extend(self._node_dict[f])
+
+        self.last_node = tup
+
+    def visit_tuple_getitem(self, t):
+        self.visit(t.tuple_value)
+        tup_node = self._node_dict[t.tuple_value]
+        if len(tup_node) > 1:
+            self._node_dict[t] = tup_node[t.index]
+        else:
+            node_entry = copy.deepcopy(tup_node[0])
+            output_names = [node_entry["output_names"][t.index]]
+            node_entry["output_names"] = output_names
+            self._node_dict[t] = [node_entry]
+        self.last_node = t
+
+    def visit_call(self, call):
+        node_index = self._node_count
+        op = call.op
+        name = "{}_{}".format(op, node_index)
+        node_entry = self._get_node_entry(call, name)
+
+        node_entry["op"] = op
+        node_entry["input_names"] = []
+        node_entry["inputs"] = []
+        node_entry["output_names"] = None
+        for input_arg in call.args:
+            self.visit(input_arg)
+            input_names = []
+            for arg_node_entry in self._node_dict[input_arg]:
+                input_names.extend(arg_node_entry["output_names"])
+            node_entry["input_names"].extend(input_names)
+            node_entry["inputs"].extend([input_arg])
+
+        node_entry['types'] = call_node_infer_type(call)
+        node_entry["output_names"] = []
+        for i in range(len(node_entry['types'])):
+            node_entry["output_names"].append(name + str(i))
+        self.last_node = call
+        self._add_node(node_entry, node_index)
+        self._node_dict[call] = [node_entry]
+
+    def _add_node(self, node_entry, idx):
+        """Convert Relay operator node to ONNX operator and add it to container nodes list"""
+        if node_entry['op'].name not in relay_to_onnx_op_mapping:
+            raise NotImplementedError("Currently the operator '{0}' is "
+                                      "not supported.".format(node_entry['op'].name))
+
+        converter = relay_to_onnx_op_mapping[node_entry['op'].name]()
+
+        return converter.convert(node_entry, self._mc, self._node_dict)
+
+    def _add_params(self, node_entry, idx):
+        """Add param value to initializer and name to inputs"""
+        param_name = node_entry['name']
+        assert param_name in self._params, "The parameter {0} is not present" \
+                                           "in params dict provided.".format(param_name)
+        value = self._params[param_name]
+        numpy_array = value.asnumpy()
+        tensor = numpy_helper.from_array(numpy_array, param_name)
+        self._mc.add_initializers([tensor])
+        dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy_array.dtype]
+        input = onnx.helper.make_tensor_value_info(param_name,
+                                                   dtype,
+                                                   shape=numpy_array.shape)
+        self._mc.add_inputs([input])
+
+    def _add_constant_input(self, node_entry, idx):
+        """Create named input for constant and add it to container inputs.
+        If input is a parameter then add to param
+        """
+        node = node_entry['relay_node']
+        param_name = node_entry['name']
+        self._params[param_name] = node.data
+        self._add_params(node_entry, idx)
+
+    def _add_input(self, node_entry, idx):
+        """Add input node to container inputs. If input is a parameter then add to param"""
+        if node_entry['name'] in self._params:
+            self._add_params(node_entry, idx)
+        else:
+            type = node_entry['types'][0]

Review comment:
       I think it's probably preferable to rename type to something else to not overwrite Python's type function. Also, I am testing this with a very specific case and it seems like this function needs some extra logic to handle TupleType as an input (but it seems there's already some of that else where in the code). I may be able to provide an example (either script or relay graph) to illustrate if you'd like.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org