You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by bg...@apache.org on 2022/05/30 11:37:03 UTC
[incubator-mxnet] branch master updated: [master] Fix issue with fc_eltwise fusing (#20958)
This is an automated email from the ASF dual-hosted git repository.
bgawrych pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new ab30c56bce [master] Fix issue with fc_eltwise fusing (#20958)
ab30c56bce is described below
commit ab30c56bce902988a58c1578298060835ca5c1f9
Author: PiotrWolinski - Intel <pi...@intel.com>
AuthorDate: Mon May 30 13:36:45 2022 +0200
[master] Fix issue with fc_eltwise fusing (#20958)
* Fixed issue with fc_eltwise
* Disabled exp quantization and improved functions readability
* Update src/operator/nn/dnnl/dnnl_act.cc
Suggested change
Co-authored-by: Andrzej Kotłowski <An...@intel.com>
* Fixed linting
* Enabled exp for test_fc_eltwise
Co-authored-by: Andrzej Kotłowski <An...@intel.com>
---
src/operator/nn/dnnl/dnnl_act.cc | 6 +++---
src/operator/subgraph/dnnl/dnnl_fc_property.h | 16 +++++-----------
tests/python/dnnl/subgraphs/subgraph_common.py | 7 +++++--
tests/python/dnnl/subgraphs/test_fc_subgraph.py | 4 +---
4 files changed, 14 insertions(+), 19 deletions(-)
diff --git a/src/operator/nn/dnnl/dnnl_act.cc b/src/operator/nn/dnnl/dnnl_act.cc
index 4b51e450e4..f3ee79a3f6 100644
--- a/src/operator/nn/dnnl/dnnl_act.cc
+++ b/src/operator/nn/dnnl/dnnl_act.cc
@@ -70,9 +70,9 @@ bool SupportDNNLLeakyRelu(const LeakyReLUParam& param, const NDArray& input) {
}
bool SupportQuantizedDNNLAct(const ActivationParam& param) {
- // TODO(zhennan): Add more activation type when dnnl supports.
- // Remove this when it's identity to SupportDNNLAct.
- return param.act_type == activation::kReLU;
+ // Although it is the same as SupportDNNLAct i left it here, so when new activations
+ // will be introduced it will be easier to handle.
+ return SupportDNNLAct(param);
}
dnnl::algorithm GetDNNLActAlgo(const ActivationParam& param) {
diff --git a/src/operator/subgraph/dnnl/dnnl_fc_property.h b/src/operator/subgraph/dnnl/dnnl_fc_property.h
index 64fd507e74..902444354f 100644
--- a/src/operator/subgraph/dnnl/dnnl_fc_property.h
+++ b/src/operator/subgraph/dnnl/dnnl_fc_property.h
@@ -103,22 +103,16 @@ class SgDNNLFCSelector : public SubgraphSelector {
}
if (new_node.op() == Op::Get("LeakyReLU")) {
const LeakyReLUParam& param = nnvm::get<LeakyReLUParam>(new_node.attrs.parsed);
- if (param.act_type == leakyrelu::kLeakyReLU || param.act_type == leakyrelu::kELU ||
- param.act_type == leakyrelu::kGELU) {
+ if (SupportDNNLLeakyRelu(param)) {
matched_list_.push_back(&new_node);
status_ = kSuccess;
return true;
}
}
- if (!quantized_ &&
- (new_node.op() == Op::Get("square") || new_node.op() == Op::Get("_npi_square") ||
- new_node.op() == Op::Get("sqrt") || new_node.op() == Op::Get("_npi_sqrt") ||
- new_node.op() == Op::Get("exp") || new_node.op() == Op::Get("_npi_exp"))) {
- matched_list_.push_back(&new_node);
- status_ = kSuccess;
- return true;
- }
- if (new_node.op() == Op::Get("abs") || new_node.op() == Op::Get("_npi_absolute")) {
+ if (new_node.op() == Op::Get("square") || new_node.op() == Op::Get("_npi_square") ||
+ new_node.op() == Op::Get("sqrt") || new_node.op() == Op::Get("_npi_sqrt") ||
+ new_node.op() == Op::Get("abs") || new_node.op() == Op::Get("_npi_absolute") ||
+ new_node.op() == Op::Get("exp") || new_node.op() == Op::Get("_npi_exp")) {
matched_list_.push_back(&new_node);
status_ = kSuccess;
return true;
diff --git a/tests/python/dnnl/subgraphs/subgraph_common.py b/tests/python/dnnl/subgraphs/subgraph_common.py
index 009d9cc785..e3a102a634 100644
--- a/tests/python/dnnl/subgraphs/subgraph_common.py
+++ b/tests/python/dnnl/subgraphs/subgraph_common.py
@@ -140,7 +140,10 @@ def check_quantize(net_original, data_shapes, out_type, name='conv',
if name in config:
name = config[name][OP_NAME]
- net_original.initialize(init=mx.init.Normal(0.5), force_reinit=True)
+ sigma = 0.3 if hasattr(net_original, 'alg') is True and net_original.alg == 'exp' else 0.5
+
+ net_original.initialize(init=mx.init.Normal(sigma), force_reinit=True)
+
min_value = -1 if out_type != 'uint8' else 0
one_shape = isinstance(data_shapes, tuple)
if one_shape:
@@ -226,7 +229,7 @@ def check_fusion(net_original, data_shapes, attrs_dict, check_fp32_fusion=True,
low=data_min, high=data_max))
net_original(*data)
net_fusion = copy.copy(net_original)
- sym, params = net_original.export(None)
+ sym, _ = net_original.export(None)
if check_fp32_fusion:
if ''.join(sym.get_internals().list_outputs()).find('sqrt') != -1:
diff --git a/tests/python/dnnl/subgraphs/test_fc_subgraph.py b/tests/python/dnnl/subgraphs/test_fc_subgraph.py
index 25e3f80aad..5028750c74 100644
--- a/tests/python/dnnl/subgraphs/test_fc_subgraph.py
+++ b/tests/python/dnnl/subgraphs/test_fc_subgraph.py
@@ -137,11 +137,9 @@ def test_fc_eltwise(data_shape, use_bias, flatten, alg):
out = mx.np.clip(fc_out, 0, 1.0)
return out
- not_quant_fuze = ['sigmoid', 'log_sigmoid', 'softrelu', 'tanh', 'mish', 'square', 'square_root',
- 'exp']
attrs = {'fc': {'with_eltwise': 'true'}}
net = FCEltwise(use_bias, flatten, alg)
- check_fusion(net, data_shape, attrs, check_quantization=flatten and not alg in not_quant_fuze)
+ check_fusion(net, data_shape, attrs, check_quantization=flatten)
@mx.util.use_np