You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2022/01/27 13:49:10 UTC

[GitHub] [incubator-mxnet] bartekkuncer commented on a change in pull request #20816: [FEATURE] Fuse dequantize with convolution

bartekkuncer commented on a change in pull request #20816:
URL: https://github.com/apache/incubator-mxnet/pull/20816#discussion_r793620084



##########
File path: src/operator/subgraph/dnnl/dnnl_post_quantize_property.h
##########
@@ -209,7 +209,7 @@ class SgDNNLPostQuantizeProperty : public SubgraphProperty {
 
     // When only fused quantized operator and requantize, set min/max_cablib_range,
     // When fused quantized operator + requantize + dequantize, set dequantize flag to true.
-    if (dequantize_node != nullptr) {
+    if ((dequantize_node != nullptr && (no_enable_float_output.count(fuse_node->op()) == 0))) {

Review comment:
       Doubled brackets.
   ```suggestion
       if (dequantize_node != nullptr && (no_enable_float_output.count(fuse_node->op()) == 0)) {
   ```

##########
File path: tests/python/dnnl/subgraphs/test_conv_subgraph.py
##########
@@ -115,6 +115,45 @@ def forward(self, x):
   check_fusion(net, data_shape, attr)
 
 
+@mx.util.use_np
+@pytest.mark.parametrize('data_shape', DATA_SHAPE)
+@pytest.mark.parametrize('no_bias', [True, False])
+@pytest.mark.parametrize('out_type', ['int8', 'auto'])

Review comment:
       Why not uint8?

##########
File path: tests/python/dnnl/subgraphs/test_conv_subgraph.py
##########
@@ -115,6 +115,45 @@ def forward(self, x):
   check_fusion(net, data_shape, attr)
 
 
+@mx.util.use_np
+@pytest.mark.parametrize('data_shape', DATA_SHAPE)
+@pytest.mark.parametrize('no_bias', [True, False])
+@pytest.mark.parametrize('out_type', ['int8', 'auto'])
+def test_pos_conv_add3(no_bias, data_shape, out_type):
+  # conv + add fusion case 3
+  class ConvAdd(nn.HybridBlock):
+    def __init__(self, use_bias, **kwargs):
+        super(ConvAdd, self).__init__(**kwargs)
+        self.conv0 = nn.Conv2D(channels=data_shape[1], kernel_size=(1, 1), strides=1, use_bias=use_bias)
+
+    def forward(self, x):
+      out = x + self.conv0(x)
+      return out
+
+  net = ConvAdd(use_bias=True)
+  check_quantize(net, data_shape, out_type)
+
+
+@mx.util.use_np
+@pytest.mark.parametrize('data_shape', DATA_SHAPE)
+@pytest.mark.parametrize('no_bias', [True, False])
+@pytest.mark.parametrize('out_type', ['int8', 'auto'])

Review comment:
       Same as above.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org