You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by mo...@apache.org on 2020/09/07 19:20:43 UTC
[incubator-tvm] branch master updated: [ONNX] Add Clip importer to
handle when min/max are provided as inputs. (#6251)
This is an automated email from the ASF dual-hosted git repository.
moreau pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git
The following commit(s) were added to refs/heads/master by this push:
new 7f7df05 [ONNX] Add Clip importer to handle when min/max are provided as inputs. (#6251)
7f7df05 is described below
commit 7f7df05e16dbef8c743b1ee5cf08f480f017acff
Author: Chris Sullivan <cs...@octoml.ai>
AuthorDate: Mon Sep 7 12:20:25 2020 -0700
[ONNX] Add Clip importer to handle when min/max are provided as inputs. (#6251)
* [ONNX] Add Clip importer to handle when min/max are
provided as inputs.
* Use relay.op.minimum/maximum to handle dynamic bounds for Clip.
* Update test to new testing standard
---
python/tvm/relay/frontend/onnx.py | 27 +++++++++++++++++-
tests/python/frontend/onnx/test_forward.py | 46 ++++++++++++++++++++++++------
2 files changed, 63 insertions(+), 10 deletions(-)
diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py
index 952f667..877174c 100644
--- a/python/tvm/relay/frontend/onnx.py
+++ b/python/tvm/relay/frontend/onnx.py
@@ -34,6 +34,7 @@ from ..expr import If, Tuple, TupleGetItem
from ..expr import RefCreate, RefRead, RefWrite
from ..expr_functor import ExprFunctor
from ..adt import Match, Clause
+from ..op.tensor import minimum as _minimum, maximum as _maximum
from .common import AttrCvt, Renamer
from .common import get_relay_op, new_var, infer_shape, infer_channels
@@ -1881,6 +1882,30 @@ class RoiAlign(OnnxOpConverter):
return _vision.roi_align(x, rois, [output_height, output_width],
spatial_scale, sampling_ratio)
+class Clip(OnnxOpConverter):
+ """Operator converter for Clip.
+ """
+ @staticmethod
+ def convert_attributes(inputs, attr, params):
+ convert = AttrCvt('clip', transforms={'min': 'a_min', 'max': 'a_max'})
+ return convert(inputs, attr, params)
+
+ @classmethod
+ def _impl_v1(cls, inputs, attr, params):
+ return Clip.convert_attributes(inputs, attr, params)
+
+ @classmethod
+ def _impl_v11(cls, inputs, attr, params):
+ if 'min' in attr and 'max' in attr:
+ return Clip.convert_attributes(inputs, attr, params)
+
+ assert len(inputs) <= 3, "Clip-11 takes up to 3 inputs, input, min, max"
+ result = inputs[0]
+ for i, op in enumerate([_maximum, _minimum]):
+ if i < len(inputs) - 1:
+ result = op(result, inputs[i+1])
+ return result
+
# compatible operators that do NOT require any conversion.
_identity_list = []
@@ -1962,7 +1987,7 @@ def _get_convert_map(opset):
'Min': Minimum.get_converter(opset),
'Sum': Sum.get_converter(opset),
'Mean': Mean.get_converter(opset),
- 'Clip': AttrCvt('clip', transforms={'min': 'a_min', 'max': 'a_max'}),
+ 'Clip': Clip.get_converter(opset),
# softmax default axis is different in onnx
'Softmax': Softmax.get_converter(opset),
'LogSoftmax': AttrCvt('log_softmax', {'axis': ('axis', 1)}),
diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py
index 5921c0d..394c745 100644
--- a/tests/python/frontend/onnx/test_forward.py
+++ b/tests/python/frontend/onnx/test_forward.py
@@ -121,6 +121,14 @@ def verify_onnx_forward_impl(graph_file, data_shape, out_shape):
tvm_out = get_tvm_output(model, x, target, ctx, out_shape, dtype)
tvm.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5)
+def make_constant_node(name, data_type, dims, vals):
+ return helper.make_node('Constant',
+ inputs=[],
+ outputs=[name],
+ value=helper.make_tensor(name=name,
+ data_type=data_type,
+ dims=dims,
+ vals=vals))
@tvm.testing.uses_gpu
def test_reshape():
@@ -636,6 +644,34 @@ def test_clip():
@tvm.testing.uses_gpu
+def test_clip_min_max_as_inputs():
+ input_shape=(2,4,5,6)
+ nodes = [
+ make_constant_node('min', onnx.TensorProto.FLOAT, (), [0.]),
+ make_constant_node('max', onnx.TensorProto.FLOAT, (), [6.]),
+ ]
+ input_names = ['in', 'min', 'max']
+ nodes.append(helper.make_node(
+ 'Clip',
+ inputs=input_names,
+ outputs=['out']))
+ graph = helper.make_graph(nodes,
+ "clip_test",
+ inputs=[helper.make_tensor_value_info("in",
+ TensorProto.FLOAT, list(input_shape))],
+ outputs=[helper.make_tensor_value_info("out",
+ TensorProto.FLOAT, list(input_shape))])
+ model = helper.make_model(graph, producer_name='clip_test')
+
+ indata = np.random.uniform(-1, 7, size=input_shape).astype('float32')
+ onnx_out = get_onnxruntime_output(model, indata, 'float32')
+ for target, ctx in tvm.testing.enabled_targets():
+ tvm_out = get_tvm_output(
+ model, indata, target, ctx, input_shape, 'float32')
+ tvm.testing.assert_allclose(onnx_out, tvm_out)
+
+
+@tvm.testing.uses_gpu
def test_round():
_test_onnx_op_elementwise((2, 4, 5, 6), np.round, {}, 'float32', 'Round', {})
@@ -3009,15 +3045,6 @@ def test_gru():
@tvm.testing.uses_gpu
def test_resize():
- def make_constant_node(name, data_type, dims, vals):
- return helper.make_node('Constant',
- inputs=[],
- outputs=[name],
- value=helper.make_tensor(name=name,
- data_type=data_type,
- dims=dims,
- vals=vals))
-
def verify(ishape, oshape, scales, mode, coord_trans):
nodes = [
make_constant_node('roi', onnx.TensorProto.FLOAT, (0,), []),
@@ -3211,6 +3238,7 @@ if __name__ == '__main__':
test_isinf()
test_isnan()
test_clip()
+ test_clip_min_max_as_inputs()
test_onehot()
test_matmul()
test_batch_matmul()