Skip to content

Commit

Permalink
[Frontend] Asymmetric padding of convolution support
Browse files Browse the repository at this point in the history
  • Loading branch information
FrozenGene committed Apr 22, 2020
1 parent 3e3ccce commit d7b6fd6
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 34 deletions.
11 changes: 2 additions & 9 deletions python/tvm/relay/frontend/coreml.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,22 +77,15 @@ def _ConvolutionLayerParams(op, inexpr, etab):
pad_b = valid.paddingAmounts.borderAmounts[0].endEdgeSize
pad_r = valid.paddingAmounts.borderAmounts[1].endEdgeSize
if not all(v == 0 for v in (pad_t, pad_l, pad_b, pad_r)):
inexpr = _op.nn.pad(data=inexpr, pad_width=((0, 0),
(0, 0),
(pad_t, pad_b),
(pad_l, pad_r)))
params['padding'] = (pad_t, pad_l, pad_b, pad_r)
elif op.WhichOneof('ConvolutionPaddingType') == 'same':
assert op.same.asymmetryMode == 0, "Only support BOTTOM_RIGHT_HEAVY mode, " \
"which is used by tf/caffe and so on"
kernel = params['kernel_size']
strides = params['strides']
pad_t, pad_b = get_pad_value(H, kernel[0], strides[0])
pad_l, pad_r = get_pad_value(W, kernel[1], strides[1])
inexpr = _op.nn.pad(data=inexpr, pad_width=((0, 0),
(0, 0),
(pad_t, pad_b),
(pad_l, pad_r)))

params['padding'] = (pad_t, pad_l, pad_b, pad_r)
else:
raise NotImplementedError("Valid/Same convolution padding implemented")

Expand Down
20 changes: 2 additions & 18 deletions python/tvm/relay/frontend/keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,15 +290,7 @@ def _convert_convolution(inexpr, keras_layer, etab):
in_w = keras_layer.input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, dilated_kernel_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
if pad_t == pad_b and pad_l == pad_r:
params['padding'] = (pad_t, pad_l)
elif etab.data_layout == 'NCHW':
inexpr = _op.nn.pad(data=inexpr, pad_width=(
(0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)))
else:
inexpr = _op.nn.pad(data=inexpr, pad_width=(
(0, 0), (pad_t, pad_b), (pad_l, pad_r), (0, 0)))

params['padding'] = (pad_t, pad_l, pad_b, pad_r)
else:
msg = 'Padding with {} is not supported for operator Convolution ' \
'in frontend Keras.'
Expand Down Expand Up @@ -424,15 +416,7 @@ def _convert_separable_convolution(inexpr, keras_layer, etab):
in_w = keras_layer.input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, kernel_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, kernel_w, stride_w)
if pad_t == pad_b and pad_l == pad_r:
params0['padding'] = (pad_t, pad_l)
elif etab.data_layout == 'NCHW':
inexpr = _op.nn.pad(data=inexpr, pad_width=(
(0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)))
else:
inexpr = _op.nn.pad(data=inexpr, pad_width=(
(0, 0), (pad_t, pad_b), (pad_l, pad_r), (0, 0)))

params0['padding'] = (pad_t, pad_l, pad_b, pad_r)
else:
msg = 'Padding with {} is not supported for operator Separable ' \
'Convolution in frontend Keras.'
Expand Down
8 changes: 1 addition & 7 deletions python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -1308,13 +1308,7 @@ def convert_conv(self, op, conv_type):
pad_left, pad_right = get_pad_value(input_w, dilated_kernel_w, stride_w)
do_pad = not (pad_top == 0 and pad_bottom == 0 and pad_left == 0 and pad_right == 0)
if do_pad:
pad_value = 0
if input_tensor.qnn_params:
pad_value = get_scalar_from_constant(input_tensor.qnn_params['zero_point'])
in_expr = _op.nn.pad(data=in_expr, pad_width=((0, 0),
(pad_top, pad_bottom),
(pad_left, pad_right),
(0, 0)), pad_value=float(pad_value))
params['padding'] = [pad_top, pad_left, pad_bottom, pad_right]

else:
raise tvm.error.OpAttributeUnImplemented(
Expand Down

0 comments on commit d7b6fd6

Please sign in to comment.