You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/06/12 20:38:34 UTC

[GitHub] sandeep-krishnamurthy closed pull request #11140: [MXNET-344] [ONNX-MXNet] Add new Operator Translations for ONNX import module

sandeep-krishnamurthy closed pull request #11140: [MXNET-344] [ONNX-MXNet] Add new Operator Translations for ONNX import module
URL: https://github.com/apache/incubator-mxnet/pull/11140
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/python/mxnet/contrib/onnx/_import/import_helper.py b/python/mxnet/contrib/onnx/_import/import_helper.py
index c8d45216729..3dfff3ed681 100644
--- a/python/mxnet/contrib/onnx/_import/import_helper.py
+++ b/python/mxnet/contrib/onnx/_import/import_helper.py
@@ -32,6 +32,10 @@
 from .op_translations import reduce_max, reduce_mean, reduce_min, reduce_sum
 from .op_translations import reduce_prod, avg_pooling, max_pooling
 from .op_translations import argmax, argmin, maximum, minimum
+from .op_translations import clip, reduce_log_sum, reduce_log_sum_exp
+from .op_translations import reduce_sum_square, reduce_l2, max_roi_pooling, instance_norm
+from .op_translations import log_softmax, softsign, lesser, greater, equal
+from .op_translations import logical_and, logical_or, logical_xor, logical_not
 
 # convert_map defines maps of ONNX operator names to converter functor(callable)
 # defined in the op_translations module.
@@ -102,6 +106,22 @@
     # Sorting and Searching
     'ArgMax'            : argmax,
     'ArgMin'            : argmin,
-    'Max'               : maximum, #elemwise maximum
-    'Min'               : minimum #elemwise minimum
+    'Max'               : maximum,
+    'Min'               : minimum,
+    'Clip'              : clip,
+    'ReduceLogSum'      : reduce_log_sum,
+    'ReduceLogSumExp'   : reduce_log_sum_exp,
+    'ReduceSumSquare'   : reduce_sum_square,
+    'ReduceL2'          : reduce_l2,
+    'MaxRoiPool'        : max_roi_pooling,
+    'InstanceNormalization' : instance_norm,
+    'LogSoftmax'        : log_softmax,
+    'Softsign'          : softsign,
+    'Less'              : lesser,
+    'Greater'           : greater,
+    'Equal'             : equal,
+    'And'               : logical_and,
+    'Xor'               : logical_xor,
+    'Not'               : logical_not,
+    'Or'                : logical_or
 }
diff --git a/python/mxnet/contrib/onnx/_import/op_translations.py b/python/mxnet/contrib/onnx/_import/op_translations.py
index e02cb0c2b62..0fad0080bef 100644
--- a/python/mxnet/contrib/onnx/_import/op_translations.py
+++ b/python/mxnet/contrib/onnx/_import/op_translations.py
@@ -18,6 +18,7 @@
 # coding: utf-8
 """ Module for translating ONNX operators into Mxnet operatoes"""
 # pylint: disable=unused-argument,protected-access
+import numpy as np
 from . import translation_utils
 from .... import symbol
 
@@ -80,6 +81,22 @@ def divide(attrs, inputs, proto_obj):
         return op_value, new_attr, inputs
     return 'broadcast_div', new_attr, inputs
 
+def logical_and(attrs, inputs, proto_obj):
+    """Logical and of two input arrays."""
+    return 'broadcast_logical_and', attrs, inputs
+
+def logical_or(attrs, inputs, proto_obj):
+    """Logical or of two input arrays."""
+    return 'broadcast_logical_or', attrs, inputs
+
+def logical_xor(attrs, inputs, proto_obj):
+    """Logical xor of two input arrays."""
+    return 'broadcast_logical_xor', attrs, inputs
+
+def logical_not(attrs, inputs, proto_obj):
+    """Logical not of two input arrays."""
+    return 'logical_not', attrs, inputs
+
 def absolute(attrs, inputs, proto_obj):
     """Returns element-wise absolute value of the input."""
     return 'abs', attrs, inputs
@@ -97,7 +114,6 @@ def argmax(attrs, inputs, proto_obj):
     """Returns indices of the maximum values along an axis"""
     return 'argmax', attrs, inputs
 
-
 def argmin(attrs, inputs, proto_obj):
     """Returns indices of the minimum values along an axis."""
     return 'argmin', attrs, inputs
@@ -130,6 +146,18 @@ def minimum(attrs, inputs, proto_obj):
         mxnet_op = inputs[0]
     return mxnet_op, attrs, inputs
 
+def lesser(attrs, inputs, proto_obj):
+    """Logical Lesser operator with broadcasting."""
+    return 'broadcast_lesser', attrs, inputs
+
+def greater(attrs, inputs, proto_obj):
+    """Logical Greater operator with broadcasting."""
+    return 'broadcast_greater', attrs, inputs
+
+def equal(attrs, inputs, proto_obj):
+    """Logical Equal operator with broadcasting."""
+    return 'broadcast_equal', attrs, inputs
+
 #Hyperbolic functions
 def tanh(attrs, inputs, proto_obj):
     """Returns the hyperbolic tangent of the input array."""
@@ -151,6 +179,10 @@ def concat(attrs, inputs, proto_obj):
     return 'concat', new_attrs, inputs
 
 # Basic neural network functions
+def softsign(attrs, inputs, proto_obj):
+    """Computes softsign of x element-wise."""
+    return 'softsign', attrs, inputs
+
 def sigmoid(attrs, inputs, proto_obj):
     """Computes elementwise sigmoid of the input array"""
     return 'sigmoid', attrs, inputs
@@ -183,6 +215,11 @@ def batch_norm(attrs, inputs, proto_obj):
     new_attrs['fix_gamma'] = not attrs.get('is_test', 1)
     return 'BatchNorm', new_attrs, inputs
 
+def instance_norm(attrs, inputs, proto_obj):
+    """Instance Normalization."""
+    new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'})
+    return 'InstanceNorm', new_attrs, inputs
+
 def leaky_relu(attrs, inputs, proto_obj):
     """Leaky Relu function"""
     if 'alpha' in attrs:
@@ -211,6 +248,16 @@ def softmax(attrs, inputs, proto_obj):
         attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1})
     return 'softmax', attrs, inputs
 
+def log_softmax(attrs, inputs, proto_obj):
+    """Computes the log softmax of the input. This is equivalent to
+    computing softmax followed by log."""
+    return 'log_softmax', attrs, inputs
+
+def softplus(attrs, inputs, proto_obj):
+    """Applies the sofplus activation function element-wise to the input."""
+    new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'})
+    return 'Activation', new_attrs, inputs
+
 def conv(attrs, inputs, proto_obj):
     """Compute N-D convolution on (N+2)-D input."""
     new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
@@ -389,15 +436,9 @@ def transpose(attrs, inputs, proto_obj):
 
 def squeeze(attrs, inputs, proto_obj):
     """Remove single-dimensional entries from the shape of a tensor."""
-    # MXNet doesnt have a squeeze operator.
-    # Using "split" to perform similar operation.
     new_attrs = translation_utils._fix_attribute_names(attrs,
                                                        {'axes' : 'axis'})
-    axes = new_attrs.get('axis')
-    mxnet_op = symbol.split(inputs[0], axis=axes[0], num_outputs=1, squeeze_axis=1)
-    for i in axes[1:]:
-        mxnet_op = symbol.split(mxnet_op, axis=i-1, num_outputs=1, squeeze_axis=1)
-    return mxnet_op, new_attrs, inputs
+    return 'squeeze', new_attrs, inputs
 
 def unsqueeze(attrs, inputs, cls):
     """Inserts a new axis of size 1 into the array shape"""
@@ -417,6 +458,16 @@ def flatten(attrs, inputs, proto_obj):
     new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
     return 'Flatten', new_attrs, inputs
 
+def clip(attrs, inputs, proto_obj):
+    """Clips (limits) the values in an array."""
+    new_attrs = translation_utils._fix_attribute_names(attrs, {'min' : 'a_min',
+                                                               'max' : 'a_max'})
+    if 'a_max' not in new_attrs:
+        new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_max' : np.inf})
+    if 'a_min' not in new_attrs:
+        new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_min' : -np.inf})
+    return 'clip', new_attrs, inputs
+
 #Powers
 def reciprocal(attrs, inputs, proto_obj):
     """Returns the reciprocal of the argument, element-wise."""
@@ -454,20 +505,49 @@ def reduce_mean(attrs, inputs, proto_obj):
     return 'mean', new_attrs, inputs
 
 def reduce_min(attrs, inputs, proto_obj):
-    """Reduce the array along a given axis by mean value"""
+    """Reduce the array along a given axis by minimum value"""
     new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
     return 'min', new_attrs, inputs
 
 def reduce_sum(attrs, inputs, proto_obj):
-    """Reduce the array along a given axis by mean value"""
+    """Reduce the array along a given axis by sum value"""
     new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
     return 'sum', new_attrs, inputs
 
 def reduce_prod(attrs, inputs, proto_obj):
-    """Reduce the array along a given axis by mean value"""
+    """Reduce the array along a given axis by product value"""
     new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
     return 'prod', new_attrs, inputs
 
+def reduce_log_sum(attrs, inputs, proto_obj):
+    """Reduce the array along a given axis by log sum value"""
+    keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
+    sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'),
+                        keepdims=keep_dims)
+    log_sym = symbol.log(sum_op)
+    return log_sym, attrs, inputs
+
+def reduce_log_sum_exp(attrs, inputs, proto_obj):
+    """Reduce the array along a given axis by log sum exp value"""
+    keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
+    exp_op = symbol.exp(inputs[0])
+    sum_op = symbol.sum(exp_op, axis=attrs.get('axes'),
+                        keepdims=keep_dims)
+    log_sym = symbol.log(sum_op)
+    return log_sym, attrs, inputs
+
+def reduce_sum_square(attrs, inputs, proto_obj):
+    """Reduce the array along a given axis by sum square value"""
+    square_op = symbol.square(inputs[0])
+    sum_op = symbol.sum(square_op, axis=attrs.get('axes'),
+                        keepdims=attrs.get('keepdims'))
+    return sum_op, attrs, inputs
+
+def reduce_l2(attrs, inputs, proto_obj):
+    """Reduce input tensor by l2 normalization."""
+    new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
+    return 'norm', new_attrs, inputs
+
 def avg_pooling(attrs, inputs, proto_obj):
     """ Average pooling"""
     new_attrs = translation_utils._fix_attribute_names(attrs,
@@ -497,3 +577,11 @@ def max_pooling(attrs, inputs, proto_obj):
     new_op = translation_utils._fix_pooling('max', inputs, new_attrs)
 
     return new_op, new_attrs, inputs
+
+def max_roi_pooling(attrs, inputs, proto_obj):
+    """Max ROI Pooling."""
+    new_attrs = translation_utils._fix_attribute_names(attrs,
+                                                       {'pooled_shape': 'pooled_size',
+                                                        'spatial_scale': 'spatial_scale'
+                                                       })
+    return 'ROIPooling', new_attrs, inputs
diff --git a/tests/python-pytest/onnx/import/onnx_import_test.py b/tests/python-pytest/onnx/import/onnx_import_test.py
index 741ae1febb1..573dd74a471 100644
--- a/tests/python-pytest/onnx/import/onnx_import_test.py
+++ b/tests/python-pytest/onnx/import/onnx_import_test.py
@@ -77,6 +77,78 @@ def test_broadcast():
     output = bkd_rep.run([input1, input2])
     npt.assert_almost_equal(output[0], numpy_op)
 
+@with_seed()
+def test_greater():
+    """Test for logical greater in onnx operators."""
+    input1 = np.random.rand(1, 3, 4, 5).astype("float32")
+    input2 = np.random.rand(1, 5).astype("float32")
+    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
+              helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
+
+    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
+
+    nodes = [helper.make_node("Greater", ["input1", "input2"], ["output"])]
+
+    graph = helper.make_graph(nodes,
+                              "greater_test",
+                              inputs,
+                              outputs)
+
+    greater_model = helper.make_model(graph)
+    
+    bkd_rep = mxnet_backend.prepare(greater_model)
+    numpy_op = np.greater(input1, input2).astype(np.float32)
+    output = bkd_rep.run([input1, input2])
+    npt.assert_almost_equal(output[0], numpy_op)
+
+@with_seed()
+def test_lesser():
+    """Test for logical greater in onnx operators."""
+    input1 = np.random.rand(1, 3, 4, 5).astype("float32")
+    input2 = np.random.rand(1, 5).astype("float32")
+    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
+              helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
+
+    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
+
+    nodes = [helper.make_node("Less", ["input1", "input2"], ["output"])]
+
+    graph = helper.make_graph(nodes,
+                              "lesser_test",
+                              inputs,
+                              outputs)
+
+    greater_model = helper.make_model(graph)
+    
+    bkd_rep = mxnet_backend.prepare(greater_model)
+    numpy_op = np.less(input1, input2).astype(np.float32)
+    output = bkd_rep.run([input1, input2])
+    npt.assert_almost_equal(output[0], numpy_op)
+    
+@with_seed()
+def test_equal():
+    """Test for logical greater in onnx operators."""
+    input1 = np.random.rand(1, 3, 4, 5).astype("float32")
+    input2 = np.random.rand(1, 5).astype("float32")
+    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
+              helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
+
+    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
+
+    nodes = [helper.make_node("Equal", ["input1", "input2"], ["output"])]
+
+    graph = helper.make_graph(nodes,
+                              "equal_test",
+                              inputs,
+                              outputs)
+
+    greater_model = helper.make_model(graph)
+    
+    bkd_rep = mxnet_backend.prepare(greater_model)
+    numpy_op = np.equal(input1, input2).astype(np.float32)
+    output = bkd_rep.run([input1, input2])
+    npt.assert_almost_equal(output[0], numpy_op)
+
 def test_super_resolution_example():
     """Test the super resolution example in the example/onnx folder"""
     sys.path.insert(0, os.path.join(CURR_PATH, '../../../../example/onnx/'))
diff --git a/tests/python-pytest/onnx/import/test_cases.py b/tests/python-pytest/onnx/import/test_cases.py
index 8e6dc443bba..1a4d8c4fe37 100644
--- a/tests/python-pytest/onnx/import/test_cases.py
+++ b/tests/python-pytest/onnx/import/test_cases.py
@@ -58,6 +58,16 @@
     'test_argmax',
     'test_argmin',
     'test_min',
+    'test_logical_and',
+    'test_logical_xor',
+    'test_logical_not',
+    'test_logical_or',
+    'test_clip',
+    'test_softsign',
+    'test_reduce_l2',
+    'test_reduce_log_sum',
+    'test_reduce_log_sum_exp',
+    'test_reduce_sum_square'
     #pytorch operator tests
     'test_operator_exp',
     'test_operator_maxpool',


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services