Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix pylint 2.2.2 gripes. #2642

Merged
merged 9 commits into from
Feb 21, 2019
2 changes: 1 addition & 1 deletion nnvm/python/nnvm/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@

class NNVMError(Exception):
"""Error that will be throwed by all nnvm functions"""
pass


def _load_lib():
"""Load libary by searching possible path."""
Expand Down
3 changes: 1 addition & 2 deletions nnvm/python/nnvm/attribute.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,7 @@ def get(self, attr):
if attr:
ret.update(attr)
return ret
else:
return attr
return attr

def __enter__(self):
# pylint: disable=protected-access
Expand Down
2 changes: 0 additions & 2 deletions nnvm/python/nnvm/compiler/compile_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,11 @@ def graph(self):
@tvm.register_node
class GraphCacheEntry(tvm.node.NodeBase):
"""CacheEntry of compilation into a TVM Function"""
pass


@tvm.register_node
class GraphFunc(tvm.node.NodeBase):
"""Compiled result of a graph into a TVM Function"""
pass


class Engine(object):
Expand Down
12 changes: 5 additions & 7 deletions nnvm/python/nnvm/frontend/caffe2.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,8 @@ def get_converter(cls):

if hasattr(cls, '_impl'):
return getattr(cls, '_impl')
else:
raise NotImplementedError('{} not implemented'.format(
cls.__name__))
raise NotImplementedError('{} not implemented'.format(
cls.__name__))


_caffe2_internal_args = {
Expand Down Expand Up @@ -175,11 +174,10 @@ def _get_axis_from_order_str(order):
order = order if isinstance(order, str) else order.decode('UTF-8')
if order == 'NCHW':
return 1
elif order == 'NHWC':
if order == 'NHWC':
return 3
else:
raise RuntimeError(
"Unsupported storage order: {} in caffe2".format(order))
raise RuntimeError(
"Unsupported storage order: {} in caffe2".format(order))

return AttrCvt(
op_name='concatenate',
Expand Down
37 changes: 17 additions & 20 deletions nnvm/python/nnvm/frontend/coreml.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,33 +98,33 @@ def ActivationParams(op, insym, symtab):
par = getattr(op, whichActivation)
if whichActivation == 'linear':
return _sym.__add_scalar__(_sym.__mul_scalar__(insym, scalar=par.alpha), scalar=par.beta)
elif whichActivation == 'ReLU':
if whichActivation == 'ReLU':
return _sym.relu(insym)
elif whichActivation == 'leakyReLU':
if whichActivation == 'leakyReLU':
return _sym.leaky_relu(insym, alpha=par.alpha)
elif whichActivation == 'thresholdedReLU':
if whichActivation == 'thresholdedReLU':
alpha_tensor = _sym.full_like(insym, fill_value=float(par.alpha))
return _sym.elemwise_mul(insym, _sym.greater(insym, alpha_tensor))
elif whichActivation == 'PReLU':
if whichActivation == 'PReLU':
return _sym.prelu(insym, alpha=par.alpha)
elif whichActivation == 'tanh':
if whichActivation == 'tanh':
return _sym.tanh(insym)
elif whichActivation == 'scaledTanh':
if whichActivation == 'scaledTanh':
return _sym.__mul_scalar__(_sym.tanh(_sym.__mul_scalar__(
insym, scalar=par.beta)), scalar=par.alpha)
elif whichActivation == 'sigmoid':
if whichActivation == 'sigmoid':
return _sym.sigmoid(insym)
elif whichActivation == 'sigmoidHard':
if whichActivation == 'sigmoidHard':
transformX = (par.alpha * insym) + par.beta
return _sym.clip(transformX, a_min=0, a_max=1)
elif whichActivation == 'ELU':
if whichActivation == 'ELU':
return _sym.__mul_scalar__(_sym.__add_scalar__(
_sym.exp(insym), scalar=-1), scalar=par.alpha)
elif whichActivation == 'softsign':
if whichActivation == 'softsign':
return insym / (1 + (_sym.relu(insym) + _sym.relu(_sym.negative(insym))))
elif whichActivation == 'softplus':
if whichActivation == 'softplus':
return _sym.log(_sym.__add_scalar__(_sym.exp(insym), scalar=1))
elif whichActivation == 'parametricSoftplus':
if whichActivation == 'parametricSoftplus':
alpha = list(par.alpha.floatValue)
beta = list(par.alpha.floatValue)
if len(alpha) == 1:
Expand All @@ -136,8 +136,7 @@ def ActivationParams(op, insym, symtab):
betasym = symtab.new_const(beta)
return _sym.broadcast_mul(_sym.log(_sym.broadcast_add(
_sym.exp(insym), betasym)), alphasym)
else:
raise NotImplementedError('%s not implemented' % whichActivation)
raise NotImplementedError('%s not implemented' % whichActivation)

def ScaleLayerParams(op, insym, symtab):
"""Scale layer params."""
Expand All @@ -157,10 +156,9 @@ def PoolingLayerParams(op, insym, symtab):
if op.globalPooling:
if op.type == 0:
return _sym.global_max_pool2d(insym)
elif op.type == 1:
if op.type == 1:
return _sym.global_avg_pool2d(insym)
else:
raise NotImplementedError("Only max and average pooling implemented")
raise NotImplementedError("Only max and average pooling implemented")

else:
params = {'pool_size':list(op.kernelSize),
Expand Down Expand Up @@ -190,10 +188,9 @@ def PoolingLayerParams(op, insym, symtab):

if op.type == 0:
return _sym.max_pool2d(insym, **params)
elif op.type == 1:
if op.type == 1:
return _sym.avg_pool2d(insym, **params)
else:
raise NotImplementedError("Only max and average pooling implemented")
raise NotImplementedError("Only max and average pooling implemented")

def SoftmaxLayerParams(op, insym, symtab):
return _sym.softmax(_sym.flatten(insym))
Expand Down
2 changes: 0 additions & 2 deletions nnvm/python/nnvm/frontend/darknet.py
Original file line number Diff line number Diff line change
Expand Up @@ -921,8 +921,6 @@ def _make_outlist(self, sym, op_name, layer, layer_num):
if layer_num != self.net.n-1:
self._outs.insert(0, sym)

return

def from_darknet(self):
"""To convert the darknet symbol to nnvm symbols."""
for i in range(self.net.n):
Expand Down
78 changes: 37 additions & 41 deletions nnvm/python/nnvm/frontend/keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,35 +47,34 @@ def _convert_activation(insym, keras_layer, _):
beta = keras_layer.beta if hasattr(keras_layer, "beta") else 0
return _sym.__add_scalar__(_sym.__mul_scalar__(insym, \
scalar=alpha), scalar=beta)
elif act_type == 'softmax':
if act_type == 'softmax':
return _sym.softmax(insym, axis=1)
elif act_type == 'sigmoid':
if act_type == 'sigmoid':
return _sym.sigmoid(insym)
elif act_type == 'tanh':
if act_type == 'tanh':
return _sym.tanh(insym)
elif act_type == 'relu':
if act_type == 'relu':
return _sym.relu(insym)
elif act_type == 'softplus':
if act_type == 'softplus':
return _sym.log(_sym.__add_scalar__(_sym.exp(insym), scalar=1))
elif act_type == 'elu':
if act_type == 'elu':
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") else 1
return _get_elu(insym, alpha)
elif act_type == 'selu':
if act_type == 'selu':
# Alpha, Gamma values, obtained from https://arxiv.org/abs/1706.02515
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") \
else 1.6732632423543772848170429916717
gamma = keras_layer.gamma if hasattr(keras_layer, "gamma") \
else 1.0507009873554804934193349852946
return gamma * _get_elu(insym, alpha)
elif act_type == 'relu6':
if act_type == 'relu6':
return _sym.clip(insym, a_min=0, a_max=6)
elif act_type == 'softsign':
if act_type == 'softsign':
return insym / (1 + (_sym.relu(insym) + _sym.relu(_sym.negative(insym))))
elif act_type == 'hard_sigmoid':
if act_type == 'hard_sigmoid':
transformX = (0.2 * insym) + 0.5
return _sym.clip(transformX, a_min=0, a_max=1)
else:
raise TypeError("Unsupported activation type : {}".format(act_type))
raise TypeError("Unsupported activation type : {}".format(act_type))


def _convert_advanced_activation(insym, keras_layer, symtab):
Expand All @@ -84,25 +83,24 @@ def _convert_advanced_activation(insym, keras_layer, symtab):
if keras_layer.max_value:
return _sym.clip(insym, a_min=0, a_max=keras_layer.max_value)
return _sym.relu(insym)
elif act_type == 'LeakyReLU':
if act_type == 'LeakyReLU':
return _sym.leaky_relu(insym, alpha=keras_layer.alpha)
elif act_type == 'ELU':
if act_type == 'ELU':
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") else 1
return _get_elu(insym, alpha)
elif act_type == 'PReLU':
if act_type == 'PReLU':
assert hasattr(keras_layer, "alpha"), \
"alpha required for PReLU."
_check_data_format(keras_layer)
size = len(keras_layer.alpha.shape)
return -symtab.new_const(keras_layer.get_weights()[0] \
.transpose(np.roll(range(size), 1))) \
* _sym.relu(-insym) + _sym.relu(insym)
elif act_type == 'ThresholdedReLU':
if act_type == 'ThresholdedReLU':
theta = keras_layer.theta if hasattr(keras_layer, "theta") else 1.0
theta_tensor = _sym.full_like(insym[0], fill_value=float(theta))
return _sym.elemwise_mul(insym[0], _sym.greater(insym[0], theta_tensor, out_type="float32"))
else:
raise TypeError("Unsupported advanced activation type : {}".format(act_type))
raise TypeError("Unsupported advanced activation type : {}".format(act_type))


def _convert_merge(insym, keras_layer, _):
Expand Down Expand Up @@ -280,31 +278,29 @@ def _convert_pooling(insym, keras_layer, symtab):
# global pool in keras = global pool + flatten in nnvm
if pool_type == 'GlobalMaxPooling2D':
return _convert_flatten(_sym.global_max_pool2d(insym), keras_layer, symtab)
elif pool_type == 'GlobalAveragePooling2D':
if pool_type == 'GlobalAveragePooling2D':
return _convert_flatten(_sym.global_avg_pool2d(insym), keras_layer, symtab)
pool_h, pool_w = keras_layer.pool_size
stride_h, stride_w = keras_layer.strides
params = {'pool_size': [pool_h, pool_w],
'strides': [stride_h, stride_w],
'padding': [0, 0]}
if keras_layer.padding == 'valid':
pass
elif keras_layer.padding == 'same':
in_h = keras_layer.input_shape[1]
in_w = keras_layer.input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, pool_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, pool_w, stride_w)
params['padding'] = [pad_t, pad_l, pad_b, pad_r]
else:
pool_h, pool_w = keras_layer.pool_size
stride_h, stride_w = keras_layer.strides
params = {'pool_size': [pool_h, pool_w],
'strides': [stride_h, stride_w],
'padding': [0, 0]}
if keras_layer.padding == 'valid':
pass
elif keras_layer.padding == 'same':
in_h = keras_layer.input_shape[1]
in_w = keras_layer.input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, pool_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, pool_w, stride_w)
params['padding'] = [pad_t, pad_l, pad_b, pad_r]
else:
raise TypeError("Unsupported padding type : {}".format(keras_layer.padding))
if pool_type == 'MaxPooling2D':
return _sym.max_pool2d(insym, **params)
elif pool_type == 'AveragePooling2D':
# TODO: in keras, padded zeros are not calculated
return _sym.avg_pool2d(insym, **params)
else:
raise TypeError("Unsupported pooling type : {}".format(keras_layer))
raise TypeError("Unsupported padding type : {}".format(keras_layer.padding))
if pool_type == 'MaxPooling2D':
return _sym.max_pool2d(insym, **params)
if pool_type == 'AveragePooling2D':
# TODO: in keras, padded zeros are not calculated
return _sym.avg_pool2d(insym, **params)
raise TypeError("Unsupported pooling type : {}".format(keras_layer))


def _convert_upsample(insym, keras_layer, _):
Expand Down
2 changes: 1 addition & 1 deletion nnvm/python/nnvm/frontend/mxnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ def _topo_sort(symbol):
if childs is None:
dep_cnts[name] = 0
else:
dep_cnts[name] = len(set([c.attr('name') for c in childs]))
dep_cnts[name] = len({c.attr('name') for c in childs})
for child in childs:
child_name = child.attr('name')
if child_name not in deps:
Expand Down
3 changes: 1 addition & 2 deletions nnvm/python/nnvm/frontend/onnx_caffe2_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@ def _impl(attr):
kernel = attr['kernel_shape']
if len(kernel) == 2:
return prefix + '2d' + surfix
else:
raise NotImplementedError("Only 2d kernel supported.")
raise NotImplementedError("Only 2d kernel supported.")

return _impl

Expand Down
8 changes: 3 additions & 5 deletions nnvm/python/nnvm/frontend/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,7 @@ def _impl(attr):
kernel = attr['kernel_shape']
if len(kernel) == 2:
return prefix + '2d' + surfix
else:
raise NotImplementedError("Only 2d kernel supported.")
raise NotImplementedError("Only 2d kernel supported.")
return _impl

def _dimension_constraint():
Expand Down Expand Up @@ -433,8 +432,7 @@ def _impl(inputs, attr, params):
op_name="reshape",
extras={'shape':tuple(params_new[0].asnumpy().flatten())},
ignores=['Tshape'])(inputs, attr)
else:
raise RuntimeError("Reshape with dynamic shape input not supported yet.")
raise RuntimeError("Reshape with dynamic shape input not supported yet.")
return _impl

def _bias_add():
Expand Down Expand Up @@ -1394,7 +1392,7 @@ def _parse_param(self, key, value, name):
self._nodes[name] = _sym.Variable(name=name,
shape=self._params[name].shape)
else:
if key != 'dtype' and key != '_output_shapes' and key != '_class':
if key not in ('dtype', '_output_shapes', '_class'):
raise NotImplementedError \
("Other attributes for a Const(param) Node {} ? .".format(key))

Expand Down
2 changes: 2 additions & 0 deletions nnvm/python/nnvm/frontend/util/tensorflow_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,8 @@ def _load_ckpt(self):
"""TODO: Load checkpoint model."""
raise RuntimeError("InputConfiguration: Loading tf checkpoint model is "
"not supported yet.")
# pylint: disable=unreachable
return 0

def parse(self):
"""Parse tensorflow models: checkpoints, saved models, and single pb
Expand Down
Loading