You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by sk...@apache.org on 2021/02/10 06:43:50 UTC

[incubator-mxnet] branch v1.x updated: [v1.x] Attempt to fix v1.x CI issues. (#19872)

This is an automated email from the ASF dual-hosted git repository.

skm pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
     new bf9e62a  [v1.x] Attempt to fix v1.x CI issues. (#19872)
bf9e62a is described below

commit bf9e62a60ebca0ad22dcb7ffae080abdbd1db380
Author: Joe Evans <jo...@gmail.com>
AuthorDate: Tue Feb 9 22:40:44 2021 -0800

    [v1.x] Attempt to fix v1.x CI issues. (#19872)
    
    * Attempt to fix v1.x CI issues.
    
    * Re-pin scipy.
    
    * Add numpy with pinned version so other package installs don't overwrite out required version.
    
    * Use python3 (from /usr/local/bin) for tensorrt gpu tests, so it can find all required python modules.
    
    * Fix onnx tests; need to pass scalar value (not np.array) to create_const_scalar_node.
    
    * Fix pylint
    
    * Set values using np.dtype(dtype) instead of using float32 and then casting to desired type.
    
    * Skip 2 tests that are flakey, reported in issue https://github.com/apache/incubator-mxnet/issues/19877.
    
    Co-authored-by: Joe Evans <jo...@amazon.com>
---
 ci/docker/install/requirements                     |  2 +-
 ci/docker/install/ubuntu_onnx.sh                   |  2 +-
 ci/docker/install/ubuntu_python.sh                 |  4 ++--
 ci/docker/runtime_functions.sh                     |  2 +-
 .../mxnet/contrib/onnx/mx2onnx/_op_translations.py | 24 ++++++++++------------
 tests/python/unittest/test_gluon_data.py           |  2 ++
 6 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/ci/docker/install/requirements b/ci/docker/install/requirements
index a5ff7f5..ce94be7 100644
--- a/ci/docker/install/requirements
+++ b/ci/docker/install/requirements
@@ -31,4 +31,4 @@ pylint==2.3.1  # pylint and astroid need to be aligned
 astroid==2.3.3  # pylint and astroid need to be aligned
 requests<2.19.0,>=2.18.4
 scipy==1.2.1
-setuptools<50
+setuptools
diff --git a/ci/docker/install/ubuntu_onnx.sh b/ci/docker/install/ubuntu_onnx.sh
index ecb9f43..096a339 100755
--- a/ci/docker/install/ubuntu_onnx.sh
+++ b/ci/docker/install/ubuntu_onnx.sh
@@ -30,4 +30,4 @@ echo "Installing libprotobuf-dev and protobuf-compiler ..."
 apt-get update || true
 apt-get install -y libprotobuf-dev protobuf-compiler
 
-pip3 install pytest pytest-cov pytest-xdist protobuf==3.5.2 onnx==1.7.0 Pillow==5.0.0 tabulate==0.7.5 onnxruntime==1.6.0 gluonnlp gluoncv
+pip3 install pytest pytest-cov pytest-xdist protobuf==3.5.2 onnx==1.7.0 Pillow==5.0.0 tabulate==0.7.5 onnxruntime==1.6.0 'numpy>1.16.0,<1.19.0' gluonnlp gluoncv
diff --git a/ci/docker/install/ubuntu_python.sh b/ci/docker/install/ubuntu_python.sh
index d31a18d..ea93067 100755
--- a/ci/docker/install/ubuntu_python.sh
+++ b/ci/docker/install/ubuntu_python.sh
@@ -26,9 +26,9 @@ apt-get update || true
 apt-get install -y software-properties-common
 add-apt-repository -y ppa:deadsnakes/ppa
 apt-get update || true
-apt-get install -y python3.6-dev virtualenv wget
+apt-get install -y python3.7-dev python3.7-distutils virtualenv wget
 # setup symlink in /usr/local/bin to override python3 version
-ln -sf /usr/bin/python3.6 /usr/local/bin/python3
+ln -sf /usr/bin/python3.7 /usr/local/bin/python3
 
 # the version of the pip shipped with ubuntu may be too lower, install a recent version here
 wget -nv https://bootstrap.pypa.io/get-pip.py
diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh
index 4d3730a..c389cf9 100755
--- a/ci/docker/runtime_functions.sh
+++ b/ci/docker/runtime_functions.sh
@@ -1077,7 +1077,7 @@ unittest_ubuntu_tensorrt_gpu() {
     export DMLC_LOG_STACK_TRACE_DEPTH=10
     pip3 install --extra-index-url https://developer.download.nvidia.com/compute/redist nvidia-dali-cuda100==0.24
     wget -nc http://data.mxnet.io/data/val_256_q90.rec
-    python3.6 tests/python/tensorrt/rec2idx.py val_256_q90.rec val_256_q90.idx
+    python3 tests/python/tensorrt/rec2idx.py val_256_q90.rec val_256_q90.idx
     nosetests-3.4 $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_trt_gpu.xml --verbose --nocapture tests/python/tensorrt/
     rm val_256_q90*
 }
diff --git a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
index c804126..778678e 100644
--- a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
+++ b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
@@ -47,6 +47,7 @@
 # coding: utf-8
 # pylint: disable=too-many-locals,no-else-return,too-many-lines
 # pylint: disable=anomalous-backslash-in-string,eval-used
+# pylint: disable=too-many-function-args
 """
 Conversion Functions for common layers.
 Add new functions here with a decorator.
@@ -162,7 +163,7 @@ def create_const_scalar_node(input_name, value, kwargs):
     initializer = kwargs["initializer"]
     input_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype]
     value_node = make_tensor_value_info(input_name, input_type, ())
-    tensor_node = make_tensor(input_name, input_type, (), (value,))
+    tensor_node = make_tensor(input_name, input_type, (), ([value]))
     initializer.append(tensor_node)
     return value_node
 
@@ -362,7 +363,7 @@ def convert_fully_connected(node, **kwargs):
     in_nodes = [name+'_data_flattened', input_nodes[1]]
 
     if no_bias:
-        nodes.append(create_const_scalar_node(name+'_bias', np.array([0], dtype=dtype), kwargs))
+        nodes.append(create_const_scalar_node(name+'_bias', np.int32(0).astype(dtype), kwargs))
         in_nodes.append(name+'_bias')
     else:
         in_nodes.append(input_nodes[2])
@@ -2430,7 +2431,7 @@ def convert_layer_norm(node, **kwargs):
         create_tensor([], name+"_void", kwargs["initializer"]),
         create_const_scalar_node(name+'_0_s', np.int64(0), kwargs),
         create_const_scalar_node(name+'_1_s', np.int64(1), kwargs),
-        create_const_scalar_node(name+"_2_s", np.array(2, dtype=dtype), kwargs),
+        create_const_scalar_node(name+"_2_s", np.int64(2).astype(dtype), kwargs),
         create_const_scalar_node(name+"_eps", np.float32(eps), kwargs),
         make_node("ReduceMean", [input_nodes[0]], [name+"_rm0_out"], axes=[axes]),
         make_node("Sub", [input_nodes[0], name+"_rm0_out"], [name+"_sub0_out"]),
@@ -2829,9 +2830,9 @@ def convert_arange_like(node, **kwargs):
         raise NotImplementedError("arange_like operator with repeat != 1 not yet implemented.")
 
     nodes = [
-        create_const_scalar_node(name+"_start", np.array([start], dtype=dtype), kwargs),
-        create_const_scalar_node(name+"_step", np.array([step], dtype=dtype), kwargs),
-        create_const_scalar_node(name+"_half_step", np.array([float(step)*0.5], dtype=dtype), kwargs),
+        create_const_scalar_node(name+"_start", np.dtype(dtype).type(start), kwargs),
+        create_const_scalar_node(name+"_step", np.dtype(dtype).type(step), kwargs),
+        create_const_scalar_node(name+"_half_step", np.dtype(dtype).type(float(step)*0.5), kwargs),
         create_tensor([], name+'_void', kwargs["initializer"])
     ]
     if axis == 'None':
@@ -2947,9 +2948,9 @@ def convert_arange(node, **kwargs):
         raise NotImplementedError("arange operator with repeat != 1 not yet implemented.")
 
     nodes = [
-        create_const_scalar_node(name+"_start", np.array([start], dtype=dtype), kwargs),
-        create_const_scalar_node(name+"_stop", np.array([stop], dtype=dtype), kwargs),
-        create_const_scalar_node(name+"_step", np.array([step], dtype=dtype), kwargs),
+        create_const_scalar_node(name+"_start", np.dtype(dtype).type(start), kwargs),
+        create_const_scalar_node(name+"_stop", np.dtype(dtype).type(stop), kwargs),
+        create_const_scalar_node(name+"_step", np.dtype(dtype).type(step), kwargs),
         make_node("Range", [name+"_start", name+"_stop", name+"_step"], [name], name=name)
     ]
 
@@ -2977,7 +2978,7 @@ def convert_reverse(node, **kwargs):
         create_tensor([axis], name+'_axis', kwargs['initializer']),
         create_tensor([axis+1], name+'_axis_p1', kwargs['initializer']),
         create_tensor([], name+'_void', kwargs['initializer']),
-        create_const_scalar_node(name+'_m1_s', np.array([-1], dtype='int64'), kwargs),
+        create_const_scalar_node(name+'_m1_s', np.int64(-1), kwargs),
         make_node('Shape', [input_nodes[0]], [name+'_shape']),
         make_node('Shape', [name+'_shape'], [name+'_dim']),
         make_node('Sub', [name+'_10', name+'_dim'], [name+'_sub']),
@@ -3188,7 +3189,6 @@ def convert_greater_scalar(node, **kwargs):
     else:
         if dtype == 'float16':
             # when using float16, we must convert it to np.uint16 view first
-            # pylint: disable=too-many-function-args
             scalar = np.float16(scalar).view(np.uint16)
 
     tensor_value = make_tensor(name+"_scalar", input_type, [1], [scalar])
@@ -3217,7 +3217,6 @@ def convert_lesser_scalar(node, **kwargs):
     else:
         if dtype == 'float16':
             # when using float16, we must convert it to np.uint16 view first
-            # pylint: disable=too-many-function-args
             scalar = np.float16(scalar).view(np.uint16)
 
     tensor_value = make_tensor(name+"_scalar", input_type, [1], [scalar])
@@ -3245,7 +3244,6 @@ def convert_equal_scalar(node, **kwargs):
     else:
         if dtype == 'float16':
             # when using float16, we must convert it to np.uint16 view first
-            # pylint: disable=too-many-function-args
             scalar = np.float16(scalar).view(np.uint16)
 
     tensor_value = make_tensor(name+"_scalar", input_type, [1], [scalar])
diff --git a/tests/python/unittest/test_gluon_data.py b/tests/python/unittest/test_gluon_data.py
index a2b8164..8c202b2 100644
--- a/tests/python/unittest/test_gluon_data.py
+++ b/tests/python/unittest/test_gluon_data.py
@@ -251,6 +251,7 @@ def _batchify(data):
         nd.array(y_lens, ctx=context.Context('cpu_shared', 0)))
 
 @with_seed()
+@unittest.skip("skipping flaky test - see https://github.com/apache/incubator-mxnet/issues/19877")
 def test_multi_worker_forked_data_loader():
     data = _Dummy(False)
     loader = DataLoader(data, batch_size=40, batchify_fn=_batchify, num_workers=2)
@@ -265,6 +266,7 @@ def test_multi_worker_forked_data_loader():
             pass
 
 @with_seed()
+@unittest.skip("skipping flaky test - see https://github.com/apache/incubator-mxnet/issues/19877")
 def test_multi_worker_dataloader_release_pool():
     # will trigger too many open file if pool is not released properly
     if os.name == 'nt':