You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by zh...@apache.org on 2021/02/06 21:05:43 UTC

[incubator-mxnet] branch v1.x updated: enable 3d convolution (#19855)

This is an automated email from the ASF dual-hosted git repository.

zha0q1 pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
     new 7fba865  enable 3d convolution (#19855)
7fba865 is described below

commit 7fba8653d4e78d77556ae9dcefb8bca7a75a2307
Author: Zhaoqi Zhu <zh...@gmail.com>
AuthorDate: Sat Feb 6 13:03:40 2021 -0800

    enable 3d convolution (#19855)
---
 .../mxnet/contrib/onnx/mx2onnx/_op_translations.py |  19 ++-
 tests/python-pytest/onnx/test_operators.py         | 173 +++++++++++++++++++--
 2 files changed, 168 insertions(+), 24 deletions(-)

diff --git a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
index 3240077..f472e18 100644
--- a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
+++ b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
@@ -238,15 +238,16 @@ def convert_convolution(node, **kwargs):
     name, input_nodes, attrs = get_inputs(node, kwargs)
 
     kernel = convert_string_to_list(attrs.get('kernel', '()'))
-    stride = convert_string_to_list(attrs.get('stride', '(1, 1)'))
-    dilate = convert_string_to_list(attrs.get('dilate', '(1, 1)'))
-    pad = convert_string_to_list(attrs.get('pad', '(0, 0)'))
+    stride = convert_string_to_list(attrs.get('stride', '()'))
+    dilate = convert_string_to_list(attrs.get('dilate', '()'))
+    pad = convert_string_to_list(attrs.get('pad', '()'))
     num_group = int(attrs.get('num_group', 1))
     no_bias = attrs.get('no_bias', 'False')
     layout = attrs.get('layout', 'NCHW')
 
-    if layout != 'NCHW':
-        raise NotImplementedError('Pooling currently does not support layout!=\'NCHW\'')
+    if layout not in ['NCHW', 'NCDHW']:
+        raise NotImplementedError('Convolution currently does not support layout not in '
+                                  '[\'NCHW\', \'NCDHW\']')
 
     if no_bias == 'True':
         assert len(input_nodes) == 2, 'Convolution takes 2 input if no_bias==True'
@@ -707,7 +708,7 @@ def convert_pooling(node, **kwargs):
     global_pool = attrs.get('global_pool', 'False')
     _ = attrs.get('cudnn_off', 'False')
     pooling_convention = attrs.get('pooling_convention', 'valid')
-    stride = convert_string_to_list(attrs.get('stride', '(1, 1)'))
+    stride = convert_string_to_list(attrs.get('stride', '()'))
     pad = convert_string_to_list(attrs.get('pad', '()'))
     p_value = int(attrs.get('p_value', '0'))
     count_include_pad = attrs.get('count_include_pad', 'True')
@@ -721,8 +722,10 @@ def convert_pooling(node, **kwargs):
     if pool_type == 'lp' and global_pool == 'False' and pooling_convention != 'valid':
         raise NotImplementedError('Pooling currently does not support '
                                   'pooling_convention!=\'valid\' when pool_type==\'lp\' and global_pool==False')
-    if layout != 'NCHW':
-        raise NotImplementedError('Pooling currently does not support layout!=\'NCHW\'')
+
+    if layout not in ['NCHW', 'NCDHW']:
+        raise NotImplementedError('Pooling currently does not support layout not in '
+                                  '[\'NCHW\', \'NCDHW\']')
 
     kwargs_ = {}
     if kernel:
diff --git a/tests/python-pytest/onnx/test_operators.py b/tests/python-pytest/onnx/test_operators.py
index a35f9b6..7dc5b5c 100644
--- a/tests/python-pytest/onnx/test_operators.py
+++ b/tests/python-pytest/onnx/test_operators.py
@@ -39,7 +39,7 @@ def def_model(op_name, dummy_input=False, **params):
                 return func(*inputs, **params)
     return Model
 
-def op_export_test(model_name, Model, inputs, tmp_path, dummy_input=False, onnx_map=None):
+def op_export_test(model_name, Model, inputs, tmp_path, dummy_input=False, onnx_map=None, mx_map=None):
     def export_to_onnx(model, model_name, inputs):
         model_path = '{}/{}'.format(tmp_path, model_name)
         model.export(model_path, epoch=0)
@@ -62,18 +62,20 @@ def op_export_test(model_name, Model, inputs, tmp_path, dummy_input=False, onnx_
     model = Model()
     model.initialize(ctx=mx.cpu(0))
     model.hybridize()
-    pred_nat = model(*inputs)
+    pred_mx = model(*inputs)
     onnx_file = export_to_onnx(model, model_name, inputs)
     pred_onx = onnx_rt(onnx_file, inputs)
     if dummy_input:
-        pred_nat = pred_nat[0]
-    if isinstance(pred_nat, list):
-        for i in range(len(pred_nat)):
+        pred_mx = pred_mx[0]
+    if isinstance(pred_mx, list):
+        for i in range(len(pred_mx)):
             pred_onx_i = onnx_map(pred_onx[i]) if onnx_map else pred_onx[i]
-            assert_almost_equal(pred_nat[i], pred_onx_i, equal_nan=True)
+            pred_mx_i = mx_map(pred_mx[i]) if mx_map else pred_mx[i]
+            assert_almost_equal(pred_onx_i, pred_mx_i, equal_nan=True)
     else:
         pred_onx = onnx_map(pred_onx[0]) if onnx_map else pred_onx[0]
-        assert_almost_equal(pred_nat, pred_onx, equal_nan=True)
+        pred_mx = mx_map(pred_mx) if mx_map else pred_mx
+        assert_almost_equal(pred_onx, pred_mx, equal_nan=True)
 
 
 def test_onnx_export_abs(tmp_path):
@@ -785,7 +787,7 @@ def test_onnx_export_pooling_avg(tmp_path, dtype, shape, count_include_pad, pool
     if pooling_convention == 'full' and count_include_pad == True:
         return
     # onnxruntime requires that pad is smaller than kernel
-    if pad and pad[0] >= kernel[0] and pad[1] >= kernel[1]:
+    if pad and (pad[0] >= kernel[0] or pad[1] >= kernel[1]):
         return
     x = mx.random.uniform(0, 1, shape, dtype=dtype)
     kwargs = {}
@@ -796,13 +798,47 @@ def test_onnx_export_pooling_avg(tmp_path, dtype, shape, count_include_pad, pool
     if pad:
         kwargs['pad'] = pad
     M = def_model('Pooling', count_include_pad=count_include_pad, pool_type='avg',
-                  pooling_convention=pooling_convention, **kwargs)
+                  pooling_convention=pooling_convention, layout='NCHW', **kwargs)
     # Note here we use np.nan_to_num to map the onnx output because onnxruntime AveragePool will
     # output NaN in some edge cases where mxnet outputs 0
     op_export_test('pooling_avg', M, [x], tmp_path, onnx_map=np.nan_to_num)
 
 
 @pytest.mark.parametrize('dtype', ['float32'])
+@pytest.mark.parametrize('shape', [(1, 3, 16, 16, 16), (1, 1, 10, 18, 18)])
+@pytest.mark.parametrize('count_include_pad', [True, False])
+@pytest.mark.parametrize('pooling_convention', ['full', 'valid'])
+@pytest.mark.parametrize('kernel', [(1, 1, 1), (3, 3, 3), (1, 7, 7)])
+@pytest.mark.parametrize('stride', [None, (1, 1, 1), (1, 2, 3)])
+@pytest.mark.parametrize('pad', [None, (0, 1, 1), (1, 2, 3)])
+def test_onnx_export_pooling_avg_3d(tmp_path, dtype, shape, count_include_pad, pooling_convention,
+                                    kernel, stride, pad):
+    # mxnet and onnxruntime has different implementation of count_include_pad on the left column
+    # and bottom row
+    if pooling_convention == 'full' and count_include_pad == True:
+        return
+    # onnxruntime requires that pad is smaller than kernel
+    if pad and (pad[0] >= kernel[0] or pad[1] >= kernel[1] or pad[2] >= kernel[2]):
+        return
+    x = mx.random.uniform(0, 1, shape, dtype=dtype)
+    kwargs = {}
+    if kernel:
+        kwargs['kernel'] = kernel
+    if stride:
+        kwargs['stride'] = stride
+    if pad:
+        kwargs['pad'] = pad
+    M = def_model('Pooling', count_include_pad=count_include_pad, pool_type='avg',
+                  pooling_convention=pooling_convention, layout='NCDHW', **kwargs)
+    # Note here we use np.nan_to_num to map the onnx output because onnxruntime AveragePool will
+    # output NaN in some edge cases where mxnet outputs 0
+    def mx_nan_to_num(a):
+        return np.nan_to_num(a.asnumpy())
+    op_export_test('pooling_avg_3d', M, [x], tmp_path, onnx_map=np.nan_to_num, mx_map=mx_nan_to_num)
+
+
+
+@pytest.mark.parametrize('dtype', ['float32'])
 @pytest.mark.parametrize('shape', [(1, 3, 64, 64), (2, 1, 60, 60)])
 @pytest.mark.parametrize('pooling_convention', ['full', 'valid'])
 @pytest.mark.parametrize('kernel', [(3, 3), (4, 5), (14, 14)])
@@ -810,7 +846,7 @@ def test_onnx_export_pooling_avg(tmp_path, dtype, shape, count_include_pad, pool
 @pytest.mark.parametrize('pad', [None, (1, 1), (3, 4), (4, 5)])
 def test_onnx_export_pooling_max(tmp_path, dtype, shape, pooling_convention, kernel, stride, pad):
     # onnxruntime requires that pad is smaller than kernel
-    if pad and pad[0] >= kernel[0] and pad[1] >= kernel[1]:
+    if pad and (pad[0] >= kernel[0] or pad[1] >= kernel[1]):
         return
     x = mx.random.uniform(0, 1, shape, dtype=dtype)
     kwargs = {}
@@ -820,11 +856,35 @@ def test_onnx_export_pooling_max(tmp_path, dtype, shape, pooling_convention, ker
         kwargs['stride'] = stride
     if pad:
         kwargs['pad'] = pad
-    M = def_model('Pooling', pool_type='max', pooling_convention=pooling_convention, **kwargs)
+    M = def_model('Pooling', pool_type='max', pooling_convention=pooling_convention,
+                  layout='NCHW', **kwargs)
     op_export_test('pooling_max', M, [x], tmp_path)
 
 
 @pytest.mark.parametrize('dtype', ['float32'])
+@pytest.mark.parametrize('shape', [(1, 3, 16, 16, 16), (1, 1, 10, 18, 18)])
+@pytest.mark.parametrize('pooling_convention', ['full', 'valid'])
+@pytest.mark.parametrize('kernel', [(1, 1, 1), (3, 3, 3), (1, 7, 7)])
+@pytest.mark.parametrize('stride', [None, (1, 1, 1), (1, 2, 3)])
+@pytest.mark.parametrize('pad', [None, (0, 1, 1), (1, 2, 3)])
+def test_onnx_export_pooling_max_3d(tmp_path, dtype, shape, pooling_convention, kernel, stride, pad):
+    # onnxruntime requires that pad is smaller than kernel
+    if pad and (pad[0] >= kernel[0] or pad[1] >= kernel[1] or pad[2] >= kernel[2]):
+        return
+    x = mx.random.uniform(0, 1, shape, dtype=dtype)
+    kwargs = {}
+    if kernel:
+        kwargs['kernel'] = kernel
+    if stride:
+        kwargs['stride'] = stride
+    if pad:
+        kwargs['pad'] = pad
+    M = def_model('Pooling', pool_type='max', pooling_convention=pooling_convention,
+                  layout='NCDHW', **kwargs)
+    op_export_test('pooling_max_3d', M, [x], tmp_path)
+
+
+@pytest.mark.parametrize('dtype', ['float32'])
 @pytest.mark.parametrize('shape', [(1, 3, 64, 64), (2, 1, 60, 60)])
 @pytest.mark.parametrize('p_value', [1, 2])
 @pytest.mark.parametrize('kernel', [(3, 3), (4, 5), (14, 14)])
@@ -832,7 +892,7 @@ def test_onnx_export_pooling_max(tmp_path, dtype, shape, pooling_convention, ker
 @pytest.mark.parametrize('pad', [None, (1, 1), (3, 4), (4, 5)])
 def test_onnx_export_pooling_lp(tmp_path, dtype, shape, p_value, kernel, stride, pad):
     # onnxruntime requires that pad is smaller than kernel
-    if pad and pad[0] >= kernel[0] and pad[1] >= kernel[1]:
+    if pad and (pad[0] >= kernel[0] or pad[1] >= kernel[1]):
         return
     x = mx.random.uniform(0, 1, shape, dtype=dtype)
     kwargs = {}
@@ -843,11 +903,34 @@ def test_onnx_export_pooling_lp(tmp_path, dtype, shape, p_value, kernel, stride,
     if pad:
         kwargs['pad'] = pad
     M = def_model('Pooling', pool_type='lp', pooling_convention='valid',
-                  p_value=p_value, **kwargs)
+                  p_value=p_value, layout='NCHW', **kwargs)
     op_export_test('pooling_lp', M, [x], tmp_path)
 
 
 @pytest.mark.parametrize('dtype', ['float32'])
+@pytest.mark.parametrize('shape', [(1, 3, 16, 16, 16), (1, 1, 10, 18, 18)])
+@pytest.mark.parametrize('p_value', [1, 2])
+@pytest.mark.parametrize('kernel', [(1, 1, 1), (3, 3, 3), (1, 7, 7)])
+@pytest.mark.parametrize('stride', [None, (1, 1, 1), (1, 2, 3)])
+@pytest.mark.parametrize('pad', [None, (0, 1, 1), (1, 2, 3)])
+def test_onnx_export_pooling_lp_3d(tmp_path, dtype, shape, p_value, kernel, stride, pad):
+    # onnxruntime requires that pad is smaller than kernel
+    if pad and (pad[0] >= kernel[0] or pad[1] >= kernel[1] or pad[2] >= kernel[2]):
+        return
+    x = mx.random.uniform(0, 1, shape, dtype=dtype)
+    kwargs = {}
+    if kernel:
+        kwargs['kernel'] = kernel
+    if stride:
+        kwargs['stride'] = stride
+    if pad:
+        kwargs['pad'] = pad
+    M = def_model('Pooling', pool_type='lp', pooling_convention='valid',
+                  p_value=p_value, layout='NCDHW', **kwargs)
+    op_export_test('pooling_lp_3d', M, [x], tmp_path)
+
+
+@pytest.mark.parametrize('dtype', ['float32'])
 @pytest.mark.parametrize('shape', [(1, 3, 64, 64), (2, 1, 60, 60)])
 @pytest.mark.parametrize('pool_type', ['avg', 'max', 'lp'])
 @pytest.mark.parametrize('p_value', [1, 2])
@@ -856,7 +939,7 @@ def test_onnx_export_pooling_lp(tmp_path, dtype, shape, p_value, kernel, stride,
 @pytest.mark.parametrize('pad', [None, (3, 4)])
 def test_onnx_export_pooling_global(tmp_path, dtype, shape, pool_type, p_value, kernel, stride, pad):
     # onnxruntime requires that pad is smaller than kernel
-    if pad and pad[0] >= kernel[0] and pad[1] >= kernel[1]:
+    if pad and (pad[0] >= kernel[0] or pad[1] >= kernel[1]):
         return
     x = mx.random.uniform(0, 1, shape, dtype=dtype)
     kwargs = {}
@@ -868,10 +951,35 @@ def test_onnx_export_pooling_global(tmp_path, dtype, shape, pool_type, p_value,
         kwargs['pad'] = pad
     # kernel, stride, and pad should have no effect on the results
     M = def_model('Pooling', global_pool=True, pool_type=pool_type, pooling_convention='valid',
-                  p_value=p_value, **kwargs)
+                  p_value=p_value, layout='NCHW', **kwargs)
     op_export_test('pooling_global', M, [x], tmp_path)
 
 
+@pytest.mark.parametrize('dtype', ['float32'])
+@pytest.mark.parametrize('shape', [(1, 3, 16, 16, 16), (1, 1, 10, 18, 18)])
+@pytest.mark.parametrize('pool_type', ['avg', 'max', 'lp'])
+@pytest.mark.parametrize('p_value', [1, 2])
+@pytest.mark.parametrize('kernel', [(1, 1, 1), (3, 3, 3)])
+@pytest.mark.parametrize('stride', [None, (1, 1, 1)])
+@pytest.mark.parametrize('pad', [None, (0, 1, 1)])
+def test_onnx_export_pooling_global_3d(tmp_path, dtype, shape, pool_type, p_value, kernel, stride, pad):
+    # onnxruntime requires that pad is smaller than kernel
+    if pad and (pad[0] >= kernel[0] or pad[1] >= kernel[1] or pad[2] >= kernel[2]):
+        return
+    x = mx.random.uniform(0, 1, shape, dtype=dtype)
+    kwargs = {}
+    if kernel:
+        kwargs['kernel'] = kernel
+    if stride:
+        kwargs['stride'] = stride
+    if pad:
+        kwargs['pad'] = pad
+    # kernel, stride, and pad should have no effect on the results
+    M = def_model('Pooling', global_pool=True, pool_type=pool_type, pooling_convention='valid',
+                  p_value=p_value, layout='NCDHW', **kwargs)
+    op_export_test('pooling_global_3d', M, [x], tmp_path)
+
+
 @pytest.mark.parametrize('dtype', ['float16', 'float32'])
 def test_onnx_export_log2(tmp_path, dtype):
     x = mx.random.normal(0, 10, (2, 3, 4, 5)).astype(dtype)
@@ -929,7 +1037,40 @@ def test_onnx_export_convolution(tmp_path, dtype, shape, num_filter, num_group,
     if dilate:
         kwargs['dilate'] = dilate
     M = def_model('Convolution', num_filter=num_filter, num_group=num_group,  no_bias=no_bias,
-                  **kwargs)
+                  layout='NCHW', **kwargs)
+    inputs = [x, w] if no_bias else [x, w, b]
+    op_export_test('convolution', M, inputs, tmp_path)
+
+
+@pytest.mark.parametrize('dtype', ['float32'])
+@pytest.mark.parametrize('shape', [(1, 4, 16, 16, 16), (1, 3, 10, 18, 18)])
+@pytest.mark.parametrize('num_filter', [2, 4, 32])
+@pytest.mark.parametrize('num_group', [1, 2])
+@pytest.mark.parametrize('no_bias', [True, False])
+@pytest.mark.parametrize('kernel', [(3, 3, 3), (1, 1, 1), (1, 7, 7)])
+@pytest.mark.parametrize('stride', [None, (1, 1, 1), (1, 2, 3)])
+@pytest.mark.parametrize('pad', [None, (0, 1, 1), (1, 2, 3)])
+@pytest.mark.parametrize('dilate', [None, [2, 2, 2]])
+def test_onnx_export_convolution_3D(tmp_path, dtype, shape, num_filter, num_group, no_bias,
+                                 kernel, stride, pad, dilate):
+    if shape[1] % num_group:
+        return
+    x = mx.random.uniform(0, 1, shape, dtype=dtype)
+    w_shape = (num_filter,) + (shape[1] // num_group,) + kernel
+    w = mx.random.uniform(0, 1, w_shape, dtype=dtype)
+    b_shape = (num_filter)
+    b = mx.random.uniform(0, 1, b_shape, dtype=dtype)
+    kwargs = {}
+    if kernel:
+        kwargs['kernel'] = kernel
+    if stride:
+        kwargs['stride'] = stride
+    if pad:
+        kwargs['pad'] = pad
+    if dilate:
+        kwargs['dilate'] = dilate
+    M = def_model('Convolution', num_filter=num_filter, num_group=num_group,  no_bias=no_bias,
+                  layout='NCDHW', **kwargs)
     inputs = [x, w] if no_bias else [x, w, b]
     op_export_test('convolution', M, inputs, tmp_path)