You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2020/09/01 16:47:58 UTC

[GitHub] [incubator-tvm] comaniac commented on a change in pull request #6366: [TARGET] Add layout_transform, clip and expand_dims in onnx converter

comaniac commented on a change in pull request #6366:
URL: https://github.com/apache/incubator-tvm/pull/6366#discussion_r481289001



##########
File path: tests/python/contrib/test_onnx.py
##########
@@ -448,6 +448,38 @@ def verify_tuple_types(dshape, indices_or_sections, axis=None, dtype = "float32"
     verify_tuple_types((5, 5, 2, 2), [1, 3, 4], axis=0)
     verify_tuple_types((5, 5, 2, 2), [1, 3, 4], axis=1)
 
+def test_layout_transform():
+    def verify_layout_transform(dshape, src_layout, dst_layout, dtype="float32"):
+        x = relay.var("x", relay.ty.TensorType(dshape, dtype))
+        y = relay.layout_transform(x, src_layout, dst_layout)
+        func = relay.Function([x], y)
+        x_data = np.random.uniform(size=dshape).astype(dtype)
+        verify_results(func, [x_data], 'test_layout_transform', rtol=1e-5, atol=1e-5)
+
+    verify_layout_transform((1, 3, 8, 8), 'NCHW', 'NHWC')
+    verify_layout_transform((1, 8, 8, 3), 'NHWC', 'NCHW')

Review comment:
       Out of curiosity, what happen if the source layout and desired layout are the same?

##########
File path: python/tvm/contrib/target/onnx.py
##########
@@ -510,6 +510,99 @@ def convert(cls, node_entry, model_container, node_dict):
                                            axis=axis)
         model_container.add_nodes([slice_node])
 
+class LayoutTransform(OpConverter):
+    """ Operator converter for Layouttransform
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        src_layout = attrs.get_str("src_layout")
+        dst_layout = attrs.get_str("dst_layout")
+
+        perm = [src_layout.index(c) for c in dst_layout]
+        return {'perm': tuple(perm)}
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        attrs = cls.convert_attributes(node_entry['relay_node'].attrs)
+        onnx_node = onnx.helper.make_node("Transpose",
+                                          node_entry['input_names'],
+                                          node_entry['output_names'],
+                                          **attrs)
+        model_container.add_nodes([onnx_node])
+
+class Clip(OpConverter):
+    """ Operator converter for Clip.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'min': attrs.a_min,
+            'max': attrs.a_max
+        }
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        attrs = cls.convert_attributes(node_entry['relay_node'].attrs)
+
+        name = node_entry['name']
+
+        def _add_input(val, input_name):
+            val_arr = numpy.asarray(val).astype(numpy.float32)
+            input_name = '{}_{}'.format(name, input_name)
+            add_input(val_arr, input_name, model_container)
+            return input_name
+
+        input_names = []
+        input_names.append(_add_input(attrs['min'], 'min'))
+        input_names.append(_add_input(attrs['max'], 'max'))
+
+        input_names = [node_entry['input_names'][0]] + input_names
+
+        node = onnx.helper.make_node(cls.__name__, input_names, node_entry['output_names'])
+        model_container.add_nodes([node])
+
+class Expand(OpConverter):
+    """ Operator converter for Expand_dims.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'axis': attrs.axis,
+            'num_newaxis': attrs.num_newaxis
+        }
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        attrs = cls.convert_attributes(node_entry['relay_node'].attrs)
+
+        name = node_entry['name']
+
+        def _add_input(val, input_name):
+            val_arr = numpy.asarray(val).astype(numpy.int64)
+            input_name = '{}_{}'.format(name, input_name)
+            add_input(val_arr, input_name, model_container)
+            return input_name
+
+        input_node = node_dict[node_entry['inputs'][0]]
+        assert len(input_node) == 1, "input node_entry can not be a Tuple"
+        input_node = input_node[0]
+        data_shape = input_node['types'][0].shape
+        new_shape = list(data_shape)
+
+        #pylint: disable=unused-variable
+        for i in range(attrs['num_newaxis']):

Review comment:
       ```suggestion
           for _ in range(attrs['num_newaxis']):
   ```

##########
File path: python/tvm/contrib/target/onnx.py
##########
@@ -510,6 +510,99 @@ def convert(cls, node_entry, model_container, node_dict):
                                            axis=axis)
         model_container.add_nodes([slice_node])
 
+class LayoutTransform(OpConverter):
+    """ Operator converter for Layouttransform
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        src_layout = attrs.get_str("src_layout")
+        dst_layout = attrs.get_str("dst_layout")
+
+        perm = [src_layout.index(c) for c in dst_layout]
+        return {'perm': tuple(perm)}
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        attrs = cls.convert_attributes(node_entry['relay_node'].attrs)
+        onnx_node = onnx.helper.make_node("Transpose",
+                                          node_entry['input_names'],
+                                          node_entry['output_names'],
+                                          **attrs)
+        model_container.add_nodes([onnx_node])
+
+class Clip(OpConverter):
+    """ Operator converter for Clip.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'min': attrs.a_min,
+            'max': attrs.a_max
+        }
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        attrs = cls.convert_attributes(node_entry['relay_node'].attrs)
+
+        name = node_entry['name']
+
+        def _add_input(val, input_name):
+            val_arr = numpy.asarray(val).astype(numpy.float32)
+            input_name = '{}_{}'.format(name, input_name)
+            add_input(val_arr, input_name, model_container)
+            return input_name
+
+        input_names = []
+        input_names.append(_add_input(attrs['min'], 'min'))
+        input_names.append(_add_input(attrs['max'], 'max'))
+
+        input_names = [node_entry['input_names'][0]] + input_names
+
+        node = onnx.helper.make_node(cls.__name__, input_names, node_entry['output_names'])
+        model_container.add_nodes([node])
+
+class Expand(OpConverter):
+    """ Operator converter for Expand_dims.
+    """
+
+    @classmethod
+    def convert_attributes(cls, attrs):
+        return {
+            'axis': attrs.axis,
+            'num_newaxis': attrs.num_newaxis
+        }
+
+    @classmethod
+    def convert(cls, node_entry, model_container, node_dict):
+        attrs = cls.convert_attributes(node_entry['relay_node'].attrs)
+
+        name = node_entry['name']
+
+        def _add_input(val, input_name):
+            val_arr = numpy.asarray(val).astype(numpy.int64)
+            input_name = '{}_{}'.format(name, input_name)
+            add_input(val_arr, input_name, model_container)
+            return input_name

Review comment:
       While I agree with @leandron that this helper function appears many times in different ops, it seems improper to put `_add_input` to `OpConverter` because 1) this is not required by all subclasses; 2)we already have a static method `add_input` and `_add_input` is more like a helper for each op.
   
   I'd suggest enhance `add_input` to support 1) numpy array data and 2) input name prefix. In this way, `add_input` itself should be sufficient.
   




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org