You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by ak...@apache.org on 2021/10/08 20:01:41 UTC
[incubator-mxnet] branch master updated: change nd -> np in
imagenet_gen_qsym_onedenn.py (#20399)
This is an automated email from the ASF dual-hosted git repository.
akarbown pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new 36cb619 change nd -> np in imagenet_gen_qsym_onedenn.py (#20399)
36cb619 is described below
commit 36cb619824a23c2d805191f726c59c8d0e2c8dfa
Author: Sylwester Fraczek <sy...@intel.com>
AuthorDate: Fri Oct 8 22:00:04 2021 +0200
change nd -> np in imagenet_gen_qsym_onedenn.py (#20399)
also few tiny formatting fixes
---
example/quantization/imagenet_gen_qsym_onednn.py | 10 ++++++----
python/mxnet/contrib/quantization.py | 9 ++++++---
2 files changed, 12 insertions(+), 7 deletions(-)
diff --git a/example/quantization/imagenet_gen_qsym_onednn.py b/example/quantization/imagenet_gen_qsym_onednn.py
index d0a8bd1..c8e6709 100644
--- a/example/quantization/imagenet_gen_qsym_onednn.py
+++ b/example/quantization/imagenet_gen_qsym_onednn.py
@@ -39,6 +39,7 @@ def download_calib_dataset(dataset_url, calib_dataset, logger=None):
logger.info('Downloading calibration dataset from %s to %s' % (dataset_url, calib_dataset))
mx.test_utils.download(dataset_url, calib_dataset)
+
def get_from_gluon(model_name, classes=1000, logger=None):
dir_path = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(dir_path, 'model')
@@ -48,12 +49,14 @@ def get_from_gluon(model_name, classes=1000, logger=None):
prefix = os.path.join(model_path, model_name)
return net, prefix
+
def regex_find_excluded_symbols(patterns_dict, model_name):
for key, value in patterns_dict.items():
if re.search(key, model_name) is not None:
return value
return None
+
def get_exclude_symbols(model_name, exclude_first_conv):
"""Grouped supported models at the time of commit:
- alexnet
@@ -95,6 +98,7 @@ def get_exclude_symbols(model_name, exclude_first_conv):
excluded_sym_names += excluded_first_conv_sym_names
return excluded_sym_names
+
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate a calibrated quantized model from a FP32 model with Intel oneDNN support')
parser.add_argument('--model', type=str, default='resnet50_v1',
@@ -116,7 +120,7 @@ if __name__ == '__main__':
help='number of batches for calibration')
parser.add_argument('--exclude-first-conv', action='store_true', default=False,
help='excluding quantizing the first conv layer since the'
- ' input data may have negative value which doesn\'t support at moment' )
+ ' input data may have negative value which doesn\'t support at moment')
parser.add_argument('--shuffle-dataset', action='store_true',
help='shuffle the calibration dataset')
parser.add_argument('--calib-mode', type=str, default='entropy',
@@ -170,8 +174,7 @@ if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.join(dir_path, 'model')
if not os.path.exists(dir_path):
- os.mkdir(dir_path) # without try catch block as we expect to finish
- # script if it fail
+ os.mkdir(dir_path) # without try catch block as we expect to finish script if it fail
# download model
if not args.no_pretrained:
@@ -191,7 +194,6 @@ if __name__ == '__main__':
epoch = args.epoch
net = gluon.SymbolBlock.imports("{}-symbol.json".format(prefix), ['data'], "{}-0000.params".format(prefix))
-
# get batch size
batch_size = args.batch_size
if logger:
diff --git a/python/mxnet/contrib/quantization.py b/python/mxnet/contrib/quantization.py
index 0cefee7..b7ff517 100644
--- a/python/mxnet/contrib/quantization.py
+++ b/python/mxnet/contrib/quantization.py
@@ -32,6 +32,7 @@ from ..io import DataDesc
from ..context import cpu, Context
from ..util import is_np_array
+
def _quantize_params(qsym, params, min_max_dict):
"""Given a quantized symbol and a dict of params that have not been quantized,
generate quantized params. Currently only supports quantizing the arg_params
@@ -86,6 +87,7 @@ def _quantize_params(qsym, params, min_max_dict):
quantized_params[name] = array_cls.array([min_max_dict[output][1]])
return quantized_params
+
def _quantize_symbol(sym, ctx, excluded_symbols=None, excluded_operators=None,
offline_params=None, quantized_dtype='int8', quantize_mode='smart',
quantize_granularity='tensor-wise'):
@@ -291,6 +293,7 @@ class _LayerHistogramCollector(CalibrationCollector):
logger.debug(f"layer={name}, min_val={min_val}, max_val={max_val}, th={th}, divergence={divergence}")
return th_dict
+
class _LayerOutputMinMaxCollector(CalibrationCollector):
"""Saves layer output min and max values in a dict with layer names as keys.
The collected min and max values will be directly used as thresholds for quantization.
@@ -319,11 +322,12 @@ class _LayerOutputMinMaxCollector(CalibrationCollector):
self.logger.debug("Collecting layer %s min_range=%f, max_range=%f"
% (name, min_range, max_range))
+
def _calibrate_quantized_sym(qsym, min_max_dict):
"""Given a dictionary containing the thresholds for quantizing the layers,
set the thresholds into the quantized symbol as the params of requantize operators.
"""
- if min_max_dict is None or len(min_max_dict) == 0:
+ if min_max_dict is None or len(min_max_dict) == 0:
return qsym
num_layer_outputs = len(min_max_dict)
layer_output_names = []
@@ -363,8 +367,6 @@ def _collect_layer_statistics(sym_block, data, collector, num_inputs, num_calib_
return num_batches
-
-
def _generate_list_of_data_desc(data_shapes, data_types):
""""Convert list ot tuples to list of DataDesc."""
if isinstance(data_shapes, list):
@@ -527,6 +529,7 @@ def quantize_model(sym, arg_params, aux_params, data_names=('data',),
return qsym, qarg_params, aux_params
+
def quantize_model_mkldnn(sym, arg_params, aux_params, data_names=('data',),
ctx=cpu(), excluded_sym_names=None, excluded_op_names=None,
calib_mode='entropy', calib_data=None, num_calib_batches=None,