Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[RELAY][DYN] Implementation of the dynamic pad operator #6284

Merged
merged 22 commits into from
Aug 20, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ struct Conv1DAttrs : public tvm::AttrsNode<Conv1DAttrs> {
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCW")
.describe(
Expand Down Expand Up @@ -148,7 +148,7 @@ struct Conv2DAttrs : public tvm::AttrsNode<Conv2DAttrs> {
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCHW")
.describe(
Expand Down Expand Up @@ -242,7 +242,7 @@ struct Conv2DWinogradAttrs : public tvm::AttrsNode<Conv2DWinogradAttrs> {
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCHW")
.describe(
Expand Down Expand Up @@ -331,7 +331,7 @@ struct Conv3DAttrs : public tvm::AttrsNode<Conv3DAttrs> {
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCDHW")
.describe(
Expand Down Expand Up @@ -381,7 +381,7 @@ struct Conv3DTransposeAttrs : public tvm::AttrsNode<Conv3DTransposeAttrs> {
"i.e. the number of output channels in the convolution.");
TVM_ATTR_FIELD(kernel_size)
.describe("The dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(strides)
.set_default(Array<IndexExpr>({1, 1, 1}))
.describe("The strides of the convolution.");
Expand Down Expand Up @@ -480,7 +480,7 @@ struct Conv3DWinogradAttrs : public tvm::AttrsNode<Conv3DWinogradAttrs> {
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCDHW")
.describe(
Expand Down Expand Up @@ -539,7 +539,7 @@ struct Conv2DTransposeAttrs : public tvm::AttrsNode<Conv2DTransposeAttrs> {
"i.e. the number of output channels in the convolution.");
TVM_ATTR_FIELD(kernel_size)
.describe("The dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(strides)
.set_default(Array<IndexExpr>({1, 1}))
.describe("The strides of the convolution.");
Expand Down Expand Up @@ -626,7 +626,7 @@ struct Conv1DTransposeAttrs : public tvm::AttrsNode<Conv1DTransposeAttrs> {
"i.e. the number of output channels in the convolution.");
TVM_ATTR_FIELD(kernel_size)
.describe("The dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(strides)
.set_default(Array<IndexExpr>({1}))
.describe("The strides of the convolution.");
Expand Down Expand Up @@ -1016,7 +1016,7 @@ struct UpSampling3DAttrs : public tvm::AttrsNode<UpSampling3DAttrs> {
/*! \brief Attributes used for the padding operator */
struct PadAttrs : public tvm::AttrsNode<PadAttrs> {
double pad_value;
Array<Array<IndexExpr> > pad_width;
Array<Array<Integer>> pad_width;
std::string pad_mode;

TVM_DECLARE_ATTRS(PadAttrs, "relay.attrs.PadAttrs") {
Expand All @@ -1037,7 +1037,7 @@ struct PadAttrs : public tvm::AttrsNode<PadAttrs> {
/*! \brief Attributes used for the MirrorPadding operator */
struct MirrorPadAttrs : public tvm::AttrsNode<MirrorPadAttrs> {
std::string mode;
Array<Array<IndexExpr> > pad_width;
Array<Array<IndexExpr>> pad_width;

TVM_DECLARE_ATTRS(MirrorPadAttrs, "relay.attrs.MirrorPadAttrs") {
TVM_ATTR_FIELD(mode)
Expand Down Expand Up @@ -1242,7 +1242,7 @@ struct DeformableConv2DAttrs : public tvm::AttrsNode<DeformableConv2DAttrs> {
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCHW")
.describe(
Expand Down
28 changes: 20 additions & 8 deletions include/tvm/topi/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,8 @@ inline tvm::te::Tensor prelu(const tvm::te::Tensor& x, const tvm::te::Tensor& sl
* "constant" pads with constant_value;
* "edge" pads using the edge values of the input array;
* "reflect" pads by reflecting values with respect to the edges.
* \param dyn_output_shape Output shape of the pad op, default nullptr.
* You only need to pass this in if the shape was evaluated dynamically.
* \param name The name of the operation
* \param tag The tag to mark the operation
*
Expand Down Expand Up @@ -151,30 +153,40 @@ inline tvm::te::Tensor prelu(const tvm::te::Tensor& x, const tvm::te::Tensor& sl
inline tvm::te::Tensor pad(const tvm::te::Tensor& t, const tvm::Array<tvm::PrimExpr>& pad_before,
tvm::Array<tvm::PrimExpr> pad_after = tvm::Array<tvm::PrimExpr>(),
PrimExpr pad_value = PrimExpr(), std::string name = "T_pad",
std::string tag = kElementWise, std::string pad_mode = "constant") {
std::string tag = kElementWise, std::string pad_mode = "constant",
const Array<PrimExpr>* dyn_output_shape = nullptr) {
if (pad_after.size() < pad_before.size()) {
for (size_t i = pad_after.size(); i < pad_before.size(); ++i) {
pad_after.push_back(pad_before[i]);
}
}

arith::Analyzer analyzer;
CHECK_GE(pad_before.size(), 1);
CHECK_EQ(pad_before.size(), pad_after.size());
tvm::Array<tvm::PrimExpr> output_shape;
tvm::Array<tvm::PrimExpr> pad_before_int32;
tvm::Array<tvm::PrimExpr> pad_after_int32;

for (const auto& ele : pad_before) {
pad_before_int32.push_back(tvm::cast(tvm::DataType::Int(32), ele));
}
for (const auto& ele : pad_after) {
pad_after_int32.push_back(tvm::cast(tvm::DataType::Int(32), ele));
}
for (size_t i = 0; i < t->shape.size(); ++i) {
if (i >= pad_before.size()) {
output_shape.push_back(t->shape[i]);
} else {
output_shape.push_back(
analyzer.Simplify(t->shape[i] + pad_before_int32[i] + pad_after_int32[i]));

tvm::Array<tvm::PrimExpr> output_shape;
if (dyn_output_shape == nullptr) {
for (size_t i = 0; i < t->shape.size(); ++i) {
if (i >= pad_before.size()) {
output_shape.push_back(t->shape[i]);
} else {
output_shape.push_back(
analyzer.Simplify(t->shape[i] + pad_before_int32[i] + pad_after_int32[i]));
}
}
} else {
for (size_t i = 0; i < dyn_output_shape->size(); i++) {
output_shape.push_back((*dyn_output_shape)[i]);
}
}

Expand Down
20 changes: 20 additions & 0 deletions python/tvm/relay/op/dyn/nn/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin, invalid-name
"""The Relay namespace containing dynamic ops."""

from . import _nn
20 changes: 20 additions & 0 deletions python/tvm/relay/op/dyn/nn/_make.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi

tvm._ffi._init_api("relay.op.dyn.nn._make", __name__)
46 changes: 46 additions & 0 deletions python/tvm/relay/op/dyn/nn/_nn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments, consider-using-in
"""Backend compiler related feature registration"""

from __future__ import absolute_import

from tvm.te.hybrid import script
from ...op import register_shape_func
from ...op import register_broadcast_schedule

# pad
register_broadcast_schedule("dyn.nn.pad")

#####################
# Shape functions #
#####################

@script
def _dyn_pad_shape_func(data, pad_width):
ndim = len(data.shape)
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
out[i] = int64(pad_width[i, 0] + pad_width[i, 1] + data.shape[i])
return out

@register_shape_func("dyn.nn.pad", True)
def pad_shape_func(attrs, inputs, data):
"""
Shape function for dynamic pad op.
"""
return [_dyn_pad_shape_func(inputs[0], inputs[1])]
14 changes: 11 additions & 3 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@
from tvm.relay import expr

from . import _make
from ..dyn.nn import _make as _dyn_make
from .util import get_pad_tuple1d, get_pad_tuple2d, get_pad_tuple3d
from ...expr import const, Expr


def conv1d(data,
Expand Down Expand Up @@ -1410,7 +1412,7 @@ def prelu(data, alpha, axis=1):

def pad(data,
pad_width,
pad_value=0.0,
pad_value=0,
pad_mode='constant'):
r"""Padding

Expand All @@ -1421,10 +1423,10 @@ def pad(data,
----------
data: tvm.relay.Expr
The input data to the operator
pad_width: tuple of <tuple of <int>>, required
pad_width: tuple of <tuple of <int>>, or tvm.relay.Expr, required
Number of values padded to the edges of each axis, in the format
of ((before_1, after_1), ..., (before_N, after_N))
pad_value: float, optional, default=0.0
pad_value: float, or tvm.relay.Expr, optional, default=0
The value used for padding
pad_mode: 'constant', 'edge', 'reflect'
'constant' pads with constant_value pad_value
Expand All @@ -1435,6 +1437,12 @@ def pad(data,
result : tvm.relay.Expr
The computed result.
"""
if (isinstance(pad_width, Expr) or (isinstance(pad_value, Expr))):
if not isinstance(pad_width, Expr):
pad_width = const(list(pad_width))
if not isinstance(pad_value, Expr):
pad_value = const(pad_value)
return _dyn_make.pad(data, pad_width, pad_value, pad_mode)
return _make.pad(data, pad_width, pad_value, pad_mode)


Expand Down
2 changes: 1 addition & 1 deletion python/tvm/te/hybrid/calls.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def _math_intrin(func_id, args):
from tvm.tir import op
return getattr(op, func_id)(*args)

sqrt = log = exp = tanh = sigmoid = power = popcount = _math_intrin #pylint: disable=invalid-name
sqrt = log = exp = tanh = sigmoid = power = popcount = round = _math_intrin #pylint: disable=invalid-name


def _min_max(func_id, args):
Expand Down
1 change: 1 addition & 0 deletions python/tvm/te/hybrid/runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ def max_num_threads(allow_none=True):
'exp' : numpy.exp,
'sigmoid' : sigmoid,
'popcount' : popcount,
'round' : round,
'likely' : lambda cond: cond,
'uint8' : numpy.uint8,
'uint16' : numpy.uint16,
Expand Down
1 change: 0 additions & 1 deletion python/tvm/topi/nn/upsampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ def upsampling(data, scale_h, scale_w, layout="NCHW", method='nearest_neighbor',
elif layout == "NHWC":
out_shape = (simplify(topi.cast(te.round(data.shape[1] * scale_h), data.shape[1].dtype)),
simplify(topi.cast(te.round(data.shape[2] * scale_w), data.shape[2].dtype)))

else:
raise ValueError("not support this layout {} yet".format(layout))
coord_trans = "align_corners" if align_corners else "asymmetric"
Expand Down
Loading