You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by sk...@apache.org on 2018/08/20 16:47:54 UTC
[incubator-mxnet] branch master updated: Python fixes for PyLint
test "consider-using-in" (#12214)
This is an automated email from the ASF dual-hosted git repository.
skm pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new aec7d2c Python fixes for PyLint test "consider-using-in" (#12214)
aec7d2c is described below
commit aec7d2c0a7288d785279eea0148a5c6b88a277cf
Author: cclauss <cc...@bluewin.ch>
AuthorDate: Mon Aug 20 18:47:46 2018 +0200
Python fixes for PyLint test "consider-using-in" (#12214)
* PyLint fixes for consider-using-in
* Add missing 'in'
---
python/mxnet/autograd.py | 2 +-
python/mxnet/contrib/onnx/mx2onnx/_op_translations.py | 2 +-
python/mxnet/contrib/quantization.py | 2 +-
python/mxnet/gluon/nn/conv_layers.py | 12 ++++--------
python/mxnet/gluon/parameter.py | 4 ++--
python/mxnet/gluon/rnn/rnn_layer.py | 2 +-
python/mxnet/ndarray/sparse.py | 8 ++++----
python/mxnet/operator.py | 2 +-
python/mxnet/test_utils.py | 12 ++++++------
python/mxnet/visualization.py | 4 ++--
tools/caffe_converter/convert_model.py | 10 ++++------
11 files changed, 27 insertions(+), 33 deletions(-)
diff --git a/python/mxnet/autograd.py b/python/mxnet/autograd.py
index e5ddaf6..b3acee2 100644
--- a/python/mxnet/autograd.py
+++ b/python/mxnet/autograd.py
@@ -465,7 +465,7 @@ class Function(object):
"autograd.Function.backward must return NDArrays, not %s"%type(ret)
if req == 0: # null
return True
- elif req == 1 or req == 2: # write or inplace
+ elif req in (1, 2): # write or inplace
igrad[:] = ret
elif req == 'add':
igrad[:] += ret
diff --git a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
index b2c9367..af7fedb 100644
--- a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
+++ b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
@@ -122,7 +122,7 @@ def convert_string_to_list(string_val):
val = val.replace("L", "")
val = val.replace("[", "")
val = val.replace("]", "")
- if val != "" and val != "None":
+ if val not in ("", "None"):
result_list.append(int(val))
return result_list
diff --git a/python/mxnet/contrib/quantization.py b/python/mxnet/contrib/quantization.py
index 62be40f..8df9239 100644
--- a/python/mxnet/contrib/quantization.py
+++ b/python/mxnet/contrib/quantization.py
@@ -489,7 +489,7 @@ def quantize_model(sym, arg_params, aux_params,
excluded_syms.append(nodes[idx])
logger.info('Quantizing symbol')
- if quantized_dtype != 'int8' and quantized_dtype != 'uint8':
+ if quantized_dtype not in ('int8', 'uint8'):
raise ValueError('unknown quantized_dtype %s received,'
' expected `int8` or `uint8`' % quantized_dtype)
qsym = _quantize_symbol(sym, excluded_symbols=excluded_syms,
diff --git a/python/mxnet/gluon/nn/conv_layers.py b/python/mxnet/gluon/nn/conv_layers.py
index e1f9b9f..96ecc21 100644
--- a/python/mxnet/gluon/nn/conv_layers.py
+++ b/python/mxnet/gluon/nn/conv_layers.py
@@ -309,8 +309,7 @@ class Conv2D(_Conv):
dilation=(1, 1), groups=1, layout='NCHW',
activation=None, use_bias=True, weight_initializer=None,
bias_initializer='zeros', in_channels=0, **kwargs):
- assert layout == 'NCHW' or layout == 'NHWC', \
- "Only supports 'NCHW' and 'NHWC' layout for now"
+ assert layout in ('NCHW', 'NHWC'), "Only supports 'NCHW' and 'NHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*2
assert len(kernel_size) == 2, "kernel_size must be a number or a list of 2 ints"
@@ -391,8 +390,7 @@ class Conv3D(_Conv):
dilation=(1, 1, 1), groups=1, layout='NCDHW', activation=None,
use_bias=True, weight_initializer=None, bias_initializer='zeros',
in_channels=0, **kwargs):
- assert layout == 'NCDHW' or layout == 'NDHWC', \
- "Only supports 'NCDHW' and 'NDHWC' layout for now"
+ assert layout in ('NCDHW', 'NDHWC'), "Only supports 'NCDHW' and 'NDHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*3
assert len(kernel_size) == 3, "kernel_size must be a number or a list of 3 ints"
@@ -564,8 +562,7 @@ class Conv2DTranspose(_Conv):
output_padding=(0, 0), dilation=(1, 1), groups=1, layout='NCHW',
activation=None, use_bias=True, weight_initializer=None,
bias_initializer='zeros', in_channels=0, **kwargs):
- assert layout == 'NCHW' or layout == 'NHWC', \
- "Only supports 'NCHW' and 'NHWC' layout for now"
+ assert layout in ('NCHW', 'NHWC'), "Only supports 'NCHW' and 'NHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*2
if isinstance(output_padding, numeric_types):
@@ -657,8 +654,7 @@ class Conv3DTranspose(_Conv):
output_padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, layout='NCDHW',
activation=None, use_bias=True, weight_initializer=None,
bias_initializer='zeros', in_channels=0, **kwargs):
- assert layout == 'NCDHW' or layout == 'NDHWC', \
- "Only supports 'NCDHW' and 'NDHWC' layout for now"
+ assert layout in ('NCDHW', 'NDHWC'), "Only supports 'NCDHW' and 'NDHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*3
if isinstance(output_padding, numeric_types):
diff --git a/python/mxnet/gluon/parameter.py b/python/mxnet/gluon/parameter.py
index 1f6b86c..24c86f4 100644
--- a/python/mxnet/gluon/parameter.py
+++ b/python/mxnet/gluon/parameter.py
@@ -165,7 +165,7 @@ class Parameter(object):
return
assert len(self._shape) == len(new_shape) and \
- all(j == 0 or i == j for i, j in zip(new_shape, self._shape)), \
+ all(j in (0, i) for i, j in zip(new_shape, self._shape)), \
"Expected shape %s is incompatible with given shape %s."%(
str(new_shape), str(self._shape))
@@ -231,7 +231,7 @@ class Parameter(object):
"""(Re)initializes by loading from data."""
if self.shape:
for self_dim, data_dim in zip(self.shape, data.shape):
- assert self_dim == 0 or self_dim == data_dim, \
+ assert self_dim in (0, data_dim), \
"Failed loading Parameter '%s' from saved params: " \
"shape incompatible expected %s vs saved %s"%(
self.name, str(self.shape), str(data.shape))
diff --git a/python/mxnet/gluon/rnn/rnn_layer.py b/python/mxnet/gluon/rnn/rnn_layer.py
index d2c6ac9..daf8ecb 100644
--- a/python/mxnet/gluon/rnn/rnn_layer.py
+++ b/python/mxnet/gluon/rnn/rnn_layer.py
@@ -37,7 +37,7 @@ class _RNNLayer(HybridBlock):
i2h_bias_initializer, h2h_bias_initializer,
mode, **kwargs):
super(_RNNLayer, self).__init__(**kwargs)
- assert layout == 'TNC' or layout == 'NTC', \
+ assert layout in ('TNC', 'NTC'), \
"Invalid layout %s; must be one of ['TNC' or 'NTC']"%layout
self._hidden_size = hidden_size
self._num_layers = num_layers
diff --git a/python/mxnet/ndarray/sparse.py b/python/mxnet/ndarray/sparse.py
index 9c02b8e..88f5eae 100644
--- a/python/mxnet/ndarray/sparse.py
+++ b/python/mxnet/ndarray/sparse.py
@@ -527,7 +527,7 @@ class CSRNDArray(BaseSparseNDArray):
return super(CSRNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
- if stype == 'default' or stype == 'csr':
+ if stype in ('default', 'csr'):
return super(CSRNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
@@ -774,7 +774,7 @@ class RowSparseNDArray(BaseSparseNDArray):
return super(RowSparseNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
- if stype == 'default' or stype == 'row_sparse':
+ if stype in ('default', 'row_sparse'):
return super(RowSparseNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
@@ -1531,7 +1531,7 @@ def zeros(stype, shape, ctx=None, dtype=None, **kwargs):
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
- if stype == 'row_sparse' or stype == 'csr':
+ if stype in ('row_sparse', 'csr'):
aux_types = _STORAGE_AUX_TYPES[stype]
else:
raise ValueError("unknown storage type" + stype)
@@ -1566,7 +1566,7 @@ def empty(stype, shape, ctx=None, dtype=None):
if dtype is None:
dtype = mx_real_t
assert(stype is not None)
- if stype == 'csr' or stype == 'row_sparse':
+ if stype in ('csr', 'row_sparse'):
return zeros(stype, shape, ctx=ctx, dtype=dtype)
else:
raise Exception("unknown stype : " + str(stype))
diff --git a/python/mxnet/operator.py b/python/mxnet/operator.py
index 1da6628..e8fa571 100644
--- a/python/mxnet/operator.py
+++ b/python/mxnet/operator.py
@@ -464,7 +464,7 @@ class CustomOp(object):
"""Helper function for assigning into dst depending on requirements."""
if req == 'null':
return
- elif req == 'write' or req == 'inplace':
+ elif req in ('write', 'inplace'):
dst[:] = src
elif req == 'add':
dst[:] += src
diff --git a/python/mxnet/test_utils.py b/python/mxnet/test_utils.py
index 69d916e..63b75cf 100644
--- a/python/mxnet/test_utils.py
+++ b/python/mxnet/test_utils.py
@@ -639,7 +639,7 @@ def _parse_location(sym, location, ctx, dtype=default_dtype()):
ValueError: Symbol arguments and keys of the given location do not match.
"""
assert isinstance(location, (dict, list, tuple))
- assert dtype == np.float16 or dtype == np.float32 or dtype == np.float64
+ assert dtype in (np.float16, np.float32, np.float64)
if isinstance(location, dict):
if set(location.keys()) != set(sym.list_arguments()):
raise ValueError("Symbol arguments and keys of the given location do not match."
@@ -698,7 +698,7 @@ def _parse_aux_states(sym, aux_states, ctx, dtype=default_dtype()):
>>> _parse_aux_states(fc2, {'batchnorm0_moving_var': mean_states}, None)
ValueError: Symbol aux_states names and given aux_states do not match.
"""
- assert dtype == np.float16 or dtype == np.float32 or dtype == np.float64
+ assert dtype in (np.float16, np.float32, np.float64)
if aux_states is not None:
if isinstance(aux_states, dict):
if set(aux_states.keys()) != set(sym.list_auxiliary_states()):
@@ -745,7 +745,7 @@ def numeric_grad(executor, location, aux_states=None, eps=1e-4,
def as_stype(var, stype, dtype):
return mx.nd.cast_storage(mx.nd.array(var, dtype=dtype), stype=stype)
- assert dtype == np.float16 or dtype == np.float32 or dtype == np.float64
+ assert dtype in (np.float16, np.float32, np.float64)
approx_grads = {k: np.zeros(v.shape, dtype=dtype)
for k, v in location.items()}
for k, v in location.items():
@@ -827,7 +827,7 @@ def check_numeric_gradient(sym, location, aux_states=None, numeric_eps=1e-3, rto
---------
..[1] https://github.com/Theano/Theano/blob/master/theano/gradient.py
"""
- assert dtype == np.float16 or dtype == np.float32 or dtype == np.float64
+ assert dtype in (np.float16, np.float32, np.float64)
if ctx is None:
ctx = default_context()
@@ -970,7 +970,7 @@ def check_symbolic_forward(sym, location, expected, rtol=1E-4, atol=None,
>>> ret_expected = np.array([[19, 22], [43, 50]])
>>> check_symbolic_forward(sym_dot, [mat1, mat2], [ret_expected])
"""
- assert dtype == np.float16 or dtype == np.float32 or dtype == np.float64
+ assert dtype in (np.float16, np.float32, np.float64)
if ctx is None:
ctx = default_context()
@@ -1055,7 +1055,7 @@ def check_symbolic_backward(sym, location, out_grads, expected, rtol=1e-5, atol=
>>> grad_expected = ograd.copy().asnumpy()
>>> check_symbolic_backward(sym_add, [mat1, mat2], [ograd], [grad_expected, grad_expected])
"""
- assert dtype == np.float16 or dtype == np.float32 or dtype == np.float64
+ assert dtype in (np.float16, np.float32, np.float64)
if ctx is None:
ctx = default_context()
diff --git a/python/mxnet/visualization.py b/python/mxnet/visualization.py
index fc6db1d..8294620 100644
--- a/python/mxnet/visualization.py
+++ b/python/mxnet/visualization.py
@@ -309,7 +309,7 @@ def plot_network(symbol, title="plot", save_format='pdf', shape=None, node_attrs
attr["fillcolor"] = cm[1]
elif op == "BatchNorm":
attr["fillcolor"] = cm[3]
- elif op == "Activation" or op == "LeakyReLU":
+ elif op in ('Activation', 'LeakyReLU'):
label = r"%s\n%s" % (op, node["attrs"]["act_type"])
attr["fillcolor"] = cm[2]
elif op == "Pooling":
@@ -318,7 +318,7 @@ def plot_network(symbol, title="plot", save_format='pdf', shape=None, node_attrs
"x".join(_str2tuple(node["attrs"]["stride"]))
if "stride" in node["attrs"] else "1")
attr["fillcolor"] = cm[4]
- elif op == "Concat" or op == "Flatten" or op == "Reshape":
+ elif op in ("Concat", "Flatten", "Reshape"):
attr["fillcolor"] = cm[5]
elif op == "Softmax":
attr["fillcolor"] = cm[6]
diff --git a/tools/caffe_converter/convert_model.py b/tools/caffe_converter/convert_model.py
index d5c069b..5c2a11e 100644
--- a/tools/caffe_converter/convert_model.py
+++ b/tools/caffe_converter/convert_model.py
@@ -77,9 +77,8 @@ def convert_model(prototxt_fname, caffemodel_fname, output_prefix=None):
layers_proto = caffe_parser.get_layers(caffe_parser.read_prototxt(prototxt_fname))
for layer_name, layer_type, layer_blobs in layer_iter:
- if layer_type == 'Convolution' or layer_type == 'InnerProduct' \
- or layer_type == 4 or layer_type == 14 or layer_type == 'PReLU' \
- or layer_type == 'Deconvolution' or layer_type == 39:
+ if layer_type in ('Convolution', 'InnerProduct', 4, 14, 'PReLU', 'Deconvolution',
+ 39):
if layer_type == 'PReLU':
assert (len(layer_blobs) == 1)
weight_name = layer_name + '_gamma'
@@ -99,7 +98,7 @@ def convert_model(prototxt_fname, caffemodel_fname, output_prefix=None):
wmat = np.array(layer_blobs[0].data).reshape(wmat_dim)
channels = wmat_dim[1]
- if channels == 3 or channels == 4: # RGB or RGBA
+ if channels in (3, 4): # RGB or RGBA
if first_conv:
# Swapping BGR of caffe into RGB in mxnet
wmat[:, [0, 2], :, :] = wmat[:, [2, 0], :, :]
@@ -133,8 +132,7 @@ def convert_model(prototxt_fname, caffemodel_fname, output_prefix=None):
arg_params[weight_name] = mx.nd.zeros(wmat.shape)
arg_params[weight_name][:] = wmat
-
- if first_conv and (layer_type == 'Convolution' or layer_type == 4):
+ if first_conv and layer_type in ('Convolution', 4):
first_conv = False
elif layer_type == 'Scale':