You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by la...@apache.org on 2020/04/28 15:35:11 UTC

[incubator-mxnet] branch master updated: Fix for out of bound access in QuantizedElemwiseMulOpShape (#18185)

This is an automated email from the ASF dual-hosted git repository.

lausen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 47cd0ba  Fix for out of bound access in QuantizedElemwiseMulOpShape (#18185)
47cd0ba is described below

commit 47cd0ba72efce13d383396387fe3e1303b83ae89
Author: bgawrych <59...@users.noreply.github.com>
AuthorDate: Tue Apr 28 17:34:09 2020 +0200

    Fix for out of bound access in QuantizedElemwiseMulOpShape (#18185)
    
    * remove vector clear call before accessing memory
    * enable test for MKLDNN
---
 src/operator/quantization/quantized_elemwise_mul.cc | 1 -
 tests/python/quantization/test_quantization.py      | 4 ----
 2 files changed, 5 deletions(-)

diff --git a/src/operator/quantization/quantized_elemwise_mul.cc b/src/operator/quantization/quantized_elemwise_mul.cc
index 522642e..7d1798f 100644
--- a/src/operator/quantization/quantized_elemwise_mul.cc
+++ b/src/operator/quantization/quantized_elemwise_mul.cc
@@ -58,7 +58,6 @@ inline bool QuantizedElemwiseMulOpShape(const nnvm::NodeAttrs& attrs,
   SHAPE_ASSIGN_CHECK(*in_attrs, quantized_elemwise_mul::kRhsMin, mxnet::TShape(1, 1));
   SHAPE_ASSIGN_CHECK(*in_attrs, quantized_elemwise_mul::kRhsMax, mxnet::TShape(1, 1));
 
-  out_attrs->clear();
   SHAPE_ASSIGN_CHECK(*out_attrs, quantized_elemwise_mul::kOut, lshape);
   if (!params.enable_float_output) {
     SHAPE_ASSIGN_CHECK(*out_attrs, quantized_elemwise_mul::kOutMin, mxnet::TShape(1, 1));
diff --git a/tests/python/quantization/test_quantization.py b/tests/python/quantization/test_quantization.py
index 2572cab..d217975 100644
--- a/tests/python/quantization/test_quantization.py
+++ b/tests/python/quantization/test_quantization.py
@@ -353,10 +353,6 @@ def test_quantized_elemwise_mul():
         if is_test_for_native_cpu():
             print('skipped testing quantized_elemwise_mul for native cpu since it is not supported yet')
             return
-        if is_test_for_mkldnn():
-            print('skipped testing quantized_elemwise_mul for mkldnn due to '
-                  'https://github.com/apache/incubator-mxnet/issues/18034')
-            return
         elif qtype != 'int8':
             print('skipped testing quantized_elemwise_mul for not supported data type')
             return