Skip to content

Commit

Permalink
[ONNX]Pool3d & upsample3d op support (#5135)
Browse files Browse the repository at this point in the history
* [ONNX]Pool3d and Upsample3d op updated

* Pool3d and Upsample3d testcase

* Review comments fixed

* Review comments
  • Loading branch information
siju-samuel authored Apr 4, 2020
1 parent 0cfdecd commit fd9ce58
Show file tree
Hide file tree
Showing 2 changed files with 100 additions and 9 deletions.
32 changes: 24 additions & 8 deletions python/tvm/relay/frontend/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,10 @@ def onnx_default_layout(dims):
return 'NCW'
if dims == 2:
return 'NCHW'
if dims == 3:
return 'NCDHW'

msg = "Only 1d and 2d layouts are currently supported"
msg = "Only 1D, 2D and 3D layouts are currently supported"
raise tvm.error.OpAttributeInvalid(msg.format(op_name))


Expand All @@ -151,8 +153,10 @@ def onnx_storage_order2layout(storage_order, dims=2):
return 'NCW' if storage_order == 0 else 'NWC'
if dims == 2:
return 'NCHW' if storage_order == 0 else 'NHWC'
if dims == 3:
return 'NCDHW' if storage_order == 0 else 'NDHWC'

msg = "Only 1d and 2d layouts are currently supported"
msg = "Only 1D, 2D and 3D layouts are currently supported"
raise tvm.error.OpAttributeInvalid(msg.format(op_name))


Expand Down Expand Up @@ -780,19 +784,31 @@ def _impl_v9(cls, inputs, attr, params):
assert len(inputs) == 2, "Upsample op take 2 inputs, {} given".format(len(inputs))
scales = params[inputs[1].name_hint].asnumpy()
inputs = inputs[:1]
assert len(scales) == 4 and scales[0] == 1.0 and scales[1] == 1.0
assert scales[0] == 1.0 and scales[1] == 1.0
input_shape = infer_shape(inputs[0])
dims = len(input_shape)
mode = attr.get('mode')
if mode == b'nearest':
method = "nearest_neighbor"
elif mode == b'linear':
method = "bilinear"
method = "trilinear" if dims == 5 else "bilinear"
else:
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "mode" of operator Upsample is not valid.'.format(mode))
attr = {'scale_h': scales[-2], 'scale_w': scales[-1], 'method': method,
'layout': 'NCHW', 'align_corners': True}
return AttrCvt('upsampling')(inputs, attr)

attr = {'scale_h': scales[-2],
'scale_w': scales[-1],
'method': method}
if dims == 5:
assert len(scales) == 5
attr['scale_d'] = scales[-3]
attr['layout'] = 'NCDHW'
op_name = 'upsampling3d'
else:
assert len(scales) == 4
attr['layout'] = 'NCHW'
attr['align_corners'] = True
op_name = 'upsampling'
return AttrCvt(op_name)(inputs, attr)

class Shape(OnnxOpConverter):
""" Operator converter for Shape.
Expand Down
77 changes: 76 additions & 1 deletion tests/python/frontend/onnx/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -741,6 +741,30 @@ def _test_upsample_nearest():
tvm.testing.assert_allclose(out_array, tvm_out)


def _test_upsample3d_nearest():
scale = 2
in_shape = (1, 1, 3, 3, 3)
out_shape = (1, 1, 3*scale, 3*scale, 3*scale)
y = helper.make_node("Upsample", ['in'], [
'out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0, 2.0])

in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = topi.testing.upsampling3d_python(
in_array, (scale, scale, scale), "NCDHW")

graph = helper.make_graph([y],
'upsample_nearest_test',
inputs=[helper.make_tensor_value_info(
"in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))])

model = helper.make_model(graph, producer_name='upsample_nearest_test')

for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, in_array, target, ctx, out_shape, 'float32')
tvm.testing.assert_allclose(out_array, tvm_out)

def _test_upsample_bilinear():
scale = 2
in_shape = (1, 1, 3, 3)
Expand Down Expand Up @@ -800,11 +824,45 @@ def _test_upsample_bilinear_opset9():
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)


def _test_upsample3d_trilinear():
scale = 2
in_shape = (1, 1, 3, 3, 3)
out_shape = (1, 1, 3*scale, 3*scale, 3*scale)
y = helper.make_node("Upsample", ['in', 'scales'], ['out'], mode='linear')
scales = [1.0, 1.0, 2.0, 2.0, 2.0]
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = topi.testing.trilinear_resize3d_python(
in_array, (3*scale, 3*scale, 3*scale), "NCDHW", coordinate_transformation_mode="half_pixel")

ref_array = np.array(scales)
ref_node = helper.make_node('Constant',
inputs=[],
outputs=['scales'],
value=onnx.helper.make_tensor(name='const_tensor',
data_type=TensorProto.FLOAT,
dims=ref_array.shape,
vals=ref_array.flatten().astype(float)))

graph = helper.make_graph([ref_node, y],
'upsample_trilinear_test',
inputs=[helper.make_tensor_value_info(
"in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))])

model = helper.make_model(
graph, producer_name='upsample_trilinear_test')

for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, in_array, target, ctx, out_shape, 'float32')
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)

def test_upsample():
_test_upsample_nearest()
_test_upsample_bilinear()
_test_upsample_bilinear_opset9()

_test_upsample3d_nearest()
_test_upsample3d_trilinear()

def _test_softmax(inshape, axis):
opname = 'Softmax'
Expand Down Expand Up @@ -1999,6 +2057,23 @@ def test_pooling():
mode=mode,
auto_pad='SAME_UPPER')

# Pool3D with stride
verify_pooling(x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=[1, 1, 1, 1, 1, 1],
out_shape=[1, 1, 16, 16, 16],
mode=mode)

# Pool3D with stride and autopadding
verify_pooling(x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=None,
out_shape=[1, 1, 16, 16, 16],
mode=mode,
auto_pad='SAME_UPPER')


def verify_lstm(seq_length,
batch_size,
Expand Down

0 comments on commit fd9ce58

Please sign in to comment.