Skip to content

Commit

Permalink
use latest error handling conventions
Browse files Browse the repository at this point in the history
  • Loading branch information
markrogersjr committed Mar 28, 2019
1 parent 908ff9a commit 4273ce9
Show file tree
Hide file tree
Showing 18 changed files with 347 additions and 236 deletions.
7 changes: 0 additions & 7 deletions nnvm/python/nnvm/frontend/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,3 @@
from .darknet import from_darknet
from .tensorflow import from_tensorflow
from .caffe2 import from_caffe2
from .common import raise_not_supported, get_nnvm_op, required_attr, \
warn_not_used, parse_tshape, parse_bool_str
from tvm.error_handling import raise_attribute_required, \
raise_attribute_invalid, \
raise_operator_unimplemented, \
raise_attribute_unimplemented, \
warn_not_used
10 changes: 6 additions & 4 deletions nnvm/python/nnvm/frontend/caffe2.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from __future__ import absolute_import as _abs
import tvm
from nnvm import symbol as _sym
from nnvm.frontend.common import get_nnvm_op, Renamer, AttrConverter as AttrCvt
from .common import get_nnvm_op
from .onnx_caffe2_utils import dimension_picker, dimension_constraint, infer_channels, revert_caffe2_pad
from . import onnx

Expand Down Expand Up @@ -73,7 +73,8 @@ def get_converter(cls):

if hasattr(cls, '_impl'):
return getattr(cls, '_impl')
raise_operator_unimplemented(cls.__name__)
raise tvm.error.OpNotImplemented(
'Operator {} is not implemented in frontend Caffe2.'.format(cls.__name__))


_caffe2_internal_args = {
Expand Down Expand Up @@ -175,7 +176,7 @@ def _get_axis_from_order_str(order):
return 1
if order == 'NHWC':
return 3
raise_attribute_invalid(order, 'storage order', 'concat')
raise tvm.error.OpAttributeInvalid('Value {} in attribute {} of operator {} is not valid.'.format(order, 'order', 'Concat'))

return AttrCvt(
op_name='concatenate',
Expand Down Expand Up @@ -425,7 +426,8 @@ def _convert_operator(self,
# Add a sanitizing step to convert all byte strings in args to strings
sym = convert_map[op_type](inputs, args, self._params)
else:
raise_operator_unimplemented(op_type)
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend Caffe2.'.format(op_type))
return sym


Expand Down
6 changes: 4 additions & 2 deletions nnvm/python/nnvm/frontend/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,15 @@
def get_nnvm_op(op_name):
op = getattr(_sym, op_name)
if not op:
raise_operator_unimplemented(op_name)
raise OpNotImplemented(
'Operator {} is not supported.'.format(op))
return op

def required_attr(attr, key, op_name):
assert isinstance(attr, dict)
if key not in attr:
raise_attribute_required(key, op_name)
raise OpAttributeRequired(
'Required attribute {} not found in operator {}'.format(key, op_name))
return attr[key]

def parse_tshape(tshape):
Expand Down
35 changes: 21 additions & 14 deletions nnvm/python/nnvm/frontend/coreml.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,10 @@
"""CoreML frontend."""
from __future__ import absolute_import as _abs
import numpy as np

import tvm
from .common import SymbolTable
from .. import symbol as _sym
from .._base import string_types
from .common import SymbolTable

__all__ = ['from_coreml']

Expand Down Expand Up @@ -83,7 +82,8 @@ def BatchnormLayerParams(op, insym, symtab):
"""Get layer of batchnorm parameter"""
# this changes the symbol
if op.instanceNormalization:
raise_operator_unimplemented('instance normalization')
msg = 'Operator "instance normalization" is not supported in frontend CoreML.'
raise tvm.error.OpNotImplemented(msg)
else:
params = {'gamma':symtab.new_const(list(op.gamma.floatValue)),
'beta':symtab.new_const(list(op.beta.floatValue)),
Expand Down Expand Up @@ -136,7 +136,8 @@ def ActivationParams(op, insym, symtab):
betasym = symtab.new_const(beta)
return _sym.broadcast_mul(_sym.log(_sym.broadcast_add(
_sym.exp(insym), betasym)), alphasym)
raise_operator_unimplemented(whichActivation)
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend CoreML.'.format(whichActivation))

def ScaleLayerParams(op, insym, symtab):
"""Scale layer params."""
Expand All @@ -158,7 +159,8 @@ def PoolingLayerParams(op, insym, symtab):
return _sym.global_max_pool2d(insym)
if op.type == 1:
return _sym.global_avg_pool2d(insym)
raise_operator_unimplemented('pooling (not max or average)')
raise tvm.error.OpNotImplemented(
'Operator pooling (not max or average) is not supported in frontend CoreML.')

else:
params = {'pool_size':list(op.kernelSize),
Expand All @@ -178,8 +180,8 @@ def PoolingLayerParams(op, insym, symtab):
params['padding'] = padding
params['ceil_mode'] = True
else:
raise_attribute_invalid(op.WhichOneof('PoolingPaddingType'),
'PoolingPaddingType', 'pooling')
msg = 'Value {} in attribute PoolingPaddingType of operator Pooling is not valid.'
raise tvm.error.OpAttributeInvalid(msg.format(op.WhichOneof('PoolingPaddingType')))

# consume padding layer
if symtab.in_padding:
Expand All @@ -191,7 +193,8 @@ def PoolingLayerParams(op, insym, symtab):
return _sym.max_pool2d(insym, **params)
if op.type == 1:
return _sym.avg_pool2d(insym, **params)
raise_operator_unimplemented('pooling (not max or average)')
msg = 'Operator pooling (not max or average) is not supported in frontend CoreML.'
raise tvm.error.OpNotImplemented(msg)

def SoftmaxLayerParams(op, insym, symtab):
return _sym.softmax(_sym.flatten(insym))
Expand Down Expand Up @@ -230,7 +233,8 @@ def ConcatLayerParams(op, insyms, symtab):
if not isinstance(insyms, list):
insyms = [insyms]
if op.sequenceConcat:
raise_operator_unimplemented('sequence concat')
raise tvm.error.OpNotImplemented(
'Operator Sequence Concat is not supported in frontend CoreML.')
ret = _sym.concatenate(*insyms, axis=1)
return ret

Expand All @@ -244,14 +248,16 @@ def PaddingLayerParams(op, insym, symtab):
if op.WhichOneof('PaddingType') == 'constant':
constant = op.constant
if constant.value != 0:
raise_attribute_invalid(constant.value, 'padding value', 'padding')
msg = 'Value {} in attribute "padding value" of operator Padding is not valid.'
raise tvm.error.OpAttributeInvalid(msg.format(constant.value))
padding = [b.startEdgeSize for b in op.paddingAmounts.borderAmounts]
padding2 = [b.endEdgeSize for b in op.paddingAmounts.borderAmounts]
for i, j in zip(padding, padding2):
assert i == j
symtab.set_padding(padding)
else:
raise_operator_unimplemented('non-constant padding')
raise tvm.error.OpNotImplemented(
'Operator "non-constant padding" is not supported in frontend CoreML.')
return insym

def PermuteLayerParams(op, insym, symtab):
Expand All @@ -260,8 +266,8 @@ def PermuteLayerParams(op, insym, symtab):

def UpsampleLayerParams(op, insym, symtab):
if op.scalingFactor[0] != op.scalingFactor[1]:
raise_attribute_invalid(op.scalingFactor, 'scaling factors',
'upsample')
raise tvm.error.OpAttributeInvalid(
'Height and width scaling factors of Upsample operator must be equal.')
interpolationMode = 'NEAREST_NEIGHBOR' if op.mode == 0 else 'BILINEAR'
return _sym.upsampling(insym, scale=op.scalingFactor[0], method=interpolationMode)

Expand Down Expand Up @@ -342,7 +348,8 @@ def coreml_op_to_nnvm(op, inname, outname, symtab):
"""
classname = type(op).__name__
if classname not in _convert_map:
raise_operator_unimplemented(classname)
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend CoreML.'.format(classname))
if isinstance(inname, string_types):
insym = symtab.get_var(inname)
else:
Expand Down
50 changes: 33 additions & 17 deletions nnvm/python/nnvm/frontend/darknet.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import numpy as np
import tvm
from .. import symbol as _sym
from .common import get_nnvm_op, required_attr, parse_tshape, parse_bool_str

class LAYERTYPE(object):
"""Darknet LAYERTYPE Class constant."""
Expand Down Expand Up @@ -61,7 +62,8 @@ def _darknet_maxpooling(inputs, attrs):
"""Process the max pool 2d operation."""
kernel = parse_tshape(required_attr(attrs, 'kernel', 'maxpool'))
if len(kernel) != 1:
raise_attribute_unimplemented('non-2d kernel', 'pool_2d')
raise tvm.error.OpAttributeUnimplemented(
'Non-2D kernels for Max Pooling are not supported in frontend Darknet.')

op_name, new_attrs = 'max_pool2d', {}
strides = int(attrs.get('stride', (1, 1)))
Expand All @@ -79,7 +81,8 @@ def _darknet_avgpooling(inputs, attrs):
"""Process the average pool 2d operation."""
kernel = parse_tshape(required_attr(attrs, 'kernel', 'avgpool'))
if len(kernel) != 1:
raise_attribute_unimplemented('non-2d kernel', 'pool_2d')
raise tvm.error.OpAttributeUnimplemented(
'Non-2D kernels for Average Pooling are not supported in frontend Darknet.')

op_name, new_attrs = 'avg_pool2d', {}
strides = int(attrs.get('stride', (1, 1)))
Expand All @@ -103,10 +106,12 @@ def _darknet_conv2d(inputs, attrs):
"""Process the convolution 2d operation."""
kernel = parse_tshape(required_attr(attrs, 'kernel', 'conv2d'))
if len(kernel) != 1:
raise_attribute_unimplemented('non 2d kernel', 'conv2d')
raise tvm.error.OpAttributeUnimplemented('Non-2D kernels for Conv2D are unsupported '
'in frontend Darknet.')
layout = attrs.get('layout', 'NCHW')
if layout not in ['NCHW', 'NHWC']:
raise_attribute_invalid(layout, 'layout', 'conv2d')
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "layout" of operator Conv2D is not valid.'.format(layout))
strides = int(attrs.get('stride', (1, 1)))
pads = int(attrs.get('pad', (0, 0)))

Expand Down Expand Up @@ -142,13 +147,16 @@ def _darknet_conv2d(inputs, attrs):
def _darknet_conv2d_transpose(inputs, attrs):
"""Process the convolution 2d transpose operation."""
if 'target_shape' in attrs:
raise_attribute_unimplemented('target_shape', 'conv2d_transpose')
raise tvm.error.OpAttributeUnimplemented(
'Attribute "target_shape" is not supported in operator Conv2D-transpose.')
kernel = parse_tshape(required_attr(attrs, 'kernel', 'conv2d_transpose'))
if len(kernel) != 2:
raise_attribute_unimplemented('non-2d kernel', 'conv2d_transpose')
raise tvm.error.OpAttributeUnimplemented(
'Non-2D kernels are not supported in operator Conv2D-transpose.')
layout = attrs.get('layout', 'NCHW')
if layout not in ['NCHW', 'NHWC']:
raise_attribute_invalid(layout, 'layout', 'conv2d_transpose')
msg = 'Value {} in attribute "layout" of operator Conv2D-transpose is not valid.'
raise tvm.error.OpAttributeInvalid(msg.format(layout))
op_name, new_attrs = 'conv2d_transpose', {}
new_attrs['channels'] = required_attr(attrs, 'num_filter', 'conv2d_transpose')
new_attrs['kernel_size'] = kernel
Expand Down Expand Up @@ -222,7 +230,8 @@ def _darknet_dropout(inputs, attrs):
def _darknet_reshape(inputs, attrs):
"""Process the reshape operation."""
if parse_bool_str(attrs, 'reverse'):
raise_attribute_unimplemented('reverse', 'reshape')
raise tvm.error.OpAttributeUnimplemented(
'Attribute "reverse" is not supported in operator Reshape.')
op_name, new_attrs = 'reshape', {}
new_attrs['shape'] = required_attr(attrs, 'shape', 'reshape')
return get_nnvm_op(op_name)(*inputs, **new_attrs), None
Expand Down Expand Up @@ -324,7 +333,8 @@ def _darknet_activations(inputs, attrs):
elif ACTIVATION.ELU == act:
act_type = 'elu'
else:
raise_operator_unimplemented('act: ' + act)
raise tvm.error.OpNotImplemented(
'Operator act: {} is not supported in framework Darknet.'.format(act))

if act_type in ['relu', 'tanh']:
op_name, new_attrs = act_type, {}
Expand All @@ -339,7 +349,8 @@ def _darknet_activations(inputs, attrs):
op_name, new_attrs = act_type, {}
sym = get_nnvm_op(op_name)(*inputs, **new_attrs)
else:
raise_operator_unimplemented('act_type: ' + act_type)
raise tvm.error.OpNotImplemented(
'Operator act: {} is not supported in framework Darknet.'.format(act))
return sym, None

def _darknet_op_not_support(inputs, attrs):
Expand Down Expand Up @@ -402,7 +413,8 @@ def _darknet_convert_symbol(op_name, inputs, attrs):
if op_name in _DARKNET_CONVERT_MAP:
sym, out_name = _DARKNET_CONVERT_MAP[op_name](inputs, attrs)
else:
raise_operator_unimplemented(op_name)
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend Darknet.'.format(op_name))
if out_name is None:
out_name = sym.list_output_names()[0].replace('_output', '')
return out_name, sym
Expand Down Expand Up @@ -448,9 +460,10 @@ def _get_convolution_weights(self, layer, opname):
if layer.nweights == 0:
return

if (layer.n * layer.c * layer.size * layer.size) != layer.nweights:
raise_attribute_invalid(layer.n * layer.c * layer.size * layer.size,
'layer weights size', 'conv2d')
if layer.n * layer.c * layer.size * layer.size != layer.nweights:
msg = 'nweights ({}) != n * c * h * w ({}) in operator {}'
msg = msg.format(layer.nweights, layer.n * layer.c * layer.size ** 2, opname)
raise tvm.error.OpAttributeInvalid(msg)

shape = (layer.n, layer.c, layer.size, layer.size)
weights = self._read_memory_buffer(shape, layer.weights)
Expand Down Expand Up @@ -630,7 +643,8 @@ def _get_darknet_attrs(self, layer, layer_num):
pass

else:
raise_operator_unimplemented(layer.type)
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend Darknet.'.format(layer.type))

return attr

Expand Down Expand Up @@ -763,7 +777,8 @@ def _handle_darknet_rnn_layers(self, layer_num, sym):

elif LAYERTYPE.LSTM == layer.type:
if layer.steps > 1:
raise_attribute_invalid(layer.steps, 'number of steps', 'RNN')
raise tvm.error.OpAttributeInvalid(
'Number of steps {} of RNN is not valid.'.format(layer.steps))

op_name_add = 'elemwise_add'
op_name_mul = 'elemwise_mul'
Expand Down Expand Up @@ -829,7 +844,8 @@ def _handle_darknet_rnn_layers(self, layer_num, sym):

elif LAYERTYPE.GRU == layer.type:
if layer.steps > 1:
raise_attribute_invalid(layer.steps, 'number of steps', 'RNN')
raise tvm.error.OpAttributeInvalid(
'Number of steps {} is not valid in RNN.'.format(layer.steps))

op_name_add = 'elemwise_add'
op_name_mul = 'elemwise_mul'
Expand Down
Loading

0 comments on commit 4273ce9

Please sign in to comment.