Skip to content

Commit

Permalink
[Relax][Op] Introducing a bunch of high-level operators (apache#12)
Browse files Browse the repository at this point in the history
* Revert "Add High-level Op Support (apache#5)"

This reverts commit 67960ee474c8797ca9111527f7b9eb6693377ad6.

* [CherryPick] Relax ops: dense/conv2d/max_pool2d/relu/softmax/flatten (tlc-pack#266)

* relax.transpose

* relax.subtract

* relax.nn.batch_norm

* relax.reshape

* relax.expand_dims

* relax.squeeze

* relax.concatenate

Co-authored-by: sung <[email protected]>
  • Loading branch information
MasterJH5574 and sunggg committed Nov 19, 2022
1 parent 35cfadd commit adb2961
Show file tree
Hide file tree
Showing 33 changed files with 2,566 additions and 220 deletions.
6 changes: 6 additions & 0 deletions include/tvm/ir/attrs.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,8 @@ class BaseAttrsNode : public Object {
* \return The fields in the Attrs.
*/
TVM_DLL virtual Array<AttrFieldInfo> ListFieldInfo() const = 0;

TVM_DLL virtual String GetName() const = 0;
/*!
* \brief Initialize the attributes by arguments.
* \param kwargs The key value pairs for initialization.
Expand Down Expand Up @@ -214,6 +216,7 @@ class DictAttrsNode : public BaseAttrsNode {
void VisitNonDefaultAttrs(AttrVisitor* v) final;
void InitByPackedArgs(const runtime::TVMArgs& args, bool allow_unknown) final;
Array<AttrFieldInfo> ListFieldInfo() const final;
String GetName() const final;

// type info
static constexpr const char* _type_key = "DictAttrs";
Expand Down Expand Up @@ -847,6 +850,7 @@ class AttrsNode : public BaseAttrsNode {
ICHECK_EQ(args.size() % 2, 0);
const int kLinearSearchBound = 16;
int hit_count = 0;

// applies two strategies to lookup
if (args.size() < kLinearSearchBound) {
// linear search.
Expand Down Expand Up @@ -918,6 +922,8 @@ class AttrsNode : public BaseAttrsNode {
return visitor.fields_;
}

String GetName() const final { return DerivedType::_type_key; }

private:
DerivedType* self() const {
return const_cast<DerivedType*>(static_cast<const DerivedType*>(this));
Expand Down
253 changes: 229 additions & 24 deletions include/tvm/relax/op_attr_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,163 @@ using FInferType = runtime::TypedPackedFunc<Type(const Call& call, DiagnosticCon
*/
using FCallPacked = String;

/*!
* \brief Computation description interface.
*
* \note This function have a special convention
* for functions with tuple input/output.
*
* So far we restrict tuple support to the following case:
* - Function which takes a single tuple as input.
* - Function which outputs a single tuple.
*
* In both cases, the tuple is flattened as array.
*
* \param attrs The attribute of the primitive
* \param inputs The input tensors.
* \param out_type The output type information
& these are always placeholders.
* \return The output compute description of the operator.
*/
using FTVMCompute = runtime::TypedPackedFunc<Array<te::Tensor>(
const Attrs& attrs, const Array<te::Tensor>& inputs, const Type& out_type)>;

/*! \brief Attributes used in MaxPool2d operator */
struct MaxPool2DAttrs : public tvm::AttrsNode<MaxPool2DAttrs> {
Array<PrimExpr> pool_size;
Array<PrimExpr> strides;
Array<PrimExpr> padding;
Array<PrimExpr> dilation;
tvm::String layout;
tvm::String out_layout;
bool ceil_mode;

TVM_DECLARE_ATTRS(MaxPool2DAttrs, "relax.attrs.MaxPool2DAttrs") {
TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows.");
TVM_ATTR_FIELD(strides)
.set_default(Array<PrimExpr>({1, 1}))
.describe("Specifies the strides of the convolution.");
TVM_ATTR_FIELD(dilation)
.set_default(Array<PrimExpr>({1, 1}))
.describe("Specifies the dilation of the convolution.");
TVM_ATTR_FIELD(padding)
.set_default(Array<PrimExpr>({0, 0}))
.describe(
"If padding is non-zero, then the input is implicitly zero-padded"
"Padding support both symmetric and asymmetric as"
"one int : same padding used on all sides"
"two int : bottom, right will use same padding as top, left"
"four int : padding width in the order of (top, left, bottom, right)");
TVM_ATTR_FIELD(layout).set_default("NCHW").describe(
"Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
"'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
"dimensions respectively. Pooling is applied on the 'H' and"
"'W' dimensions.");
TVM_ATTR_FIELD(out_layout)
.set_default("")
.describe(
"Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc."
"'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
"dimensions respectively. Pooling is applied on the 'H' and"
"'W' dimensions.");
TVM_ATTR_FIELD(ceil_mode).set_default(false).describe(
"When true, will use ceil instead of floor to compute the output shape.");
}
}; // struct MaxPool2dAttrs

/*! \brief Attributes used in Conv2d operator */
struct Conv2DAttrs : public tvm::AttrsNode<Conv2DAttrs> {
Array<PrimExpr> strides;
Array<PrimExpr> padding;
Array<PrimExpr> dilation;
int groups;
PrimExpr channels;
Array<PrimExpr> kernel_size;
String data_layout;
String kernel_layout;
String out_layout;
DataType out_dtype;

TVM_DECLARE_ATTRS(Conv2DAttrs, "relax.attrs.Conv2DAttrs") {
TVM_ATTR_FIELD(strides)
.set_default(Array<PrimExpr>({1, 1}))
.describe("Specifies the strides of the convolution.");
TVM_ATTR_FIELD(padding)
.set_default(Array<PrimExpr>({0, 0}))
.describe(
"If padding is non-zero, then the input is implicitly zero-padded"
"Padding support both symmetric and asymmetric as"
"one int : same padding used on all sides"
"two int : bottom, right will use same padding as top, left"
"four int : padding width in the order of (top, left, bottom, right)");
TVM_ATTR_FIELD(dilation)
.set_default(Array<PrimExpr>({1, 1}))
.describe("Specifies the dilation rate to use for dilated convolution.");
TVM_ATTR_FIELD(groups).set_default(1).describe(
"Controls the connections between inputs and outputs."
"At groups=1, all inputs are convolved to all outputs."
"At groups=2, the operation becomes equivalent to having two convolution"
"layers side by side, each seeing half the input channels, and producing"
"half the output channels, and both subsequently concatenated.");
TVM_ATTR_FIELD(channels)
.describe(
"The number of output channels in the convolution."
" If it is not set, inferred by shape of the weight.")
.set_default(NullValue<PrimExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<PrimExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCHW")
.describe(
"Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
"'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
"dimensions respectively. Convolution is applied on the 'H' and"
"'W' dimensions.");
TVM_ATTR_FIELD(kernel_layout)
.set_default("OIHW")
.describe(
"Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
"'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
"dimensions respectively.");
TVM_ATTR_FIELD(out_layout)
.set_default("")
.describe(
"Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
"'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
"dimensions respectively. Default to be same as input layout.");

// use 0 bits to indicate none.
TVM_ATTR_FIELD(out_dtype)
.set_default(NullValue<DataType>())
.describe("Output data type, set to explicit type under mixed precision setting");
}
}; // struct Conv2dAttrs

/*! \brief Attributes for dense operator */
struct DenseAttrs : public tvm::AttrsNode<DenseAttrs> {
PrimExpr units;
DataType out_dtype;

TVM_DECLARE_ATTRS(DenseAttrs, "relax.attrs.DenseAttrs") {
TVM_ATTR_FIELD(units).describe("Number of hidden units of the dense transformation.");

// use 0 bits to indicate none.
TVM_ATTR_FIELD(out_dtype)
.set_default(NullValue<DataType>())
.describe("Output data type, set to explicit type under mixed precision setting");
}
};

/*! \brief Attributes used in softmax operators */
struct SoftmaxAttrs : public tvm::AttrsNode<SoftmaxAttrs> {
int axis;

TVM_DECLARE_ATTRS(SoftmaxAttrs, "relax.attrs.SoftmaxAttrs") {
TVM_ATTR_FIELD(axis).set_default(-1).describe("The axis to sum over when computing softmax.");
}
};

/*! \brief Attributes used in unique operator */
struct UniqueAttrs : public tvm::AttrsNode<UniqueAttrs> {
bool sorted;
Expand Down Expand Up @@ -109,33 +266,81 @@ struct AssertOpAttrs : public tvm::AttrsNode<AssertOpAttrs> {
}
};

/*! \brief Attributes used in MaxPool2d operator */
struct MaxPool2dAttrs : public tvm::AttrsNode<MaxPool2dAttrs> {
Array<PrimExpr> kernel_size;
Array<PrimExpr> stride;
Array<PrimExpr> padding;
Array<PrimExpr> dilation;
TVM_DECLARE_ATTRS(MaxPool2dAttrs, "relax.attrs.MaxPool2dAttrs") {
TVM_ATTR_FIELD(kernel_size).describe("The size of the window to take a max over.");
TVM_ATTR_FIELD(stride).describe("The stride of the window.");
TVM_ATTR_FIELD(padding).describe("The padding on the input.");
TVM_ATTR_FIELD(dilation).describe("The stride of elements in the window.");
/*! \brief Attributes used in transpose operator */
struct TransposeAttrs : public tvm::AttrsNode<TransposeAttrs> {
Optional<Array<Integer>> axes;

TVM_DECLARE_ATTRS(TransposeAttrs, "relax.attrs.TransposeAttrs") {
TVM_ATTR_FIELD(axes)
.describe("The target axes order, reverse order if not specified.")
.set_default(Optional<Array<Integer>>{NullOpt});
}
}; // struct MaxPool2dAttrs
}; // struct TransposeAttrs

/*! \brief Attributes used in Conv2d operator */
struct Conv2dAttrs : public tvm::AttrsNode<Conv2dAttrs> {
Array<PrimExpr> kernel_size;
Array<PrimExpr> stride;
Array<PrimExpr> padding;
Array<PrimExpr> dilation;
TVM_DECLARE_ATTRS(Conv2dAttrs, "relax.attrs.Conv2dAttrs") {
TVM_ATTR_FIELD(kernel_size).describe("The size of the convolving kernel.");
TVM_ATTR_FIELD(stride).describe("The stride of the convolution.");
TVM_ATTR_FIELD(padding).describe("The padding on the input.");
TVM_ATTR_FIELD(dilation).describe("The spacing between kernel elements.");
/*! \brief Attributes used in batch_norm operator */
struct BatchNormAttrs : public tvm::AttrsNode<BatchNormAttrs> {
int axis;
double epsilon;
bool center;
bool scale;

TVM_DECLARE_ATTRS(BatchNormAttrs, "relax.attrs.BatchNormAttrs") {
TVM_ATTR_FIELD(axis).describe("Specify which shape axis denotes the channel.").set_default(1);
TVM_ATTR_FIELD(epsilon)
.describe("Small float added to variance to avoid dividing by zero")
.set_default(1e-5);
TVM_ATTR_FIELD(center)
.describe("If True, add offset of beta to normalized tensor. If False, beta is ignored")
.set_default(true);
TVM_ATTR_FIELD(scale)
.describe(
"If True, multiply by gamma. If False, gamma is not used. "
"When the next layer is piecewise linear (also, e.g., nn.relu), "
"this can be disabled since the scaling will be done by the next layer.")
.set_default(true);
}
}; // struct Conv2dAttrs
}; // struct BatchNormAttrs

/*! \brief Attributes used in expand_dims operators */
struct ExpandDimsAttrs : public tvm::AttrsNode<ExpandDimsAttrs> {
Array<Integer> axis;

TVM_DECLARE_ATTRS(ExpandDimsAttrs, "relax.attrs.ExpandDimsAttrs") {
TVM_ATTR_FIELD(axis).describe(
"The axes at which the input array is expanded."
"Each element should lie in range `[-data.ndim - 1, data.ndim]`."
"If `axis < 0`, it is the first axis inserted;"
"If `axis >= 0`, it is the last axis inserted in Python's negative indexing.");
}
}; // struct ExpandDimsAttrs

/*! \brief Attributes used in squeeze operators */
struct SqueezeAttrs : public tvm::AttrsNode<SqueezeAttrs> {
Optional<Array<Integer>> axis;

TVM_DECLARE_ATTRS(SqueezeAttrs, "relax.attrs.SqueezeAttrs") {
TVM_ATTR_FIELD(axis)
.describe(
"The axis to squeeze in the input tensor."
"If `axis = None`, all axis of dimension 1 get squeezed;"
"Else, the dimension in axes get squeezed."
"It is an error if an axis does not has dimension 1.")
.set_default(Optional<Array<Integer>>{NullOpt});
}
}; // struct SqueezeAttrs

/*! \brief Attributes used in concatenate operators */
struct ConcatenateAttrs : public tvm::AttrsNode<ConcatenateAttrs> {
Optional<Integer> axis;

TVM_DECLARE_ATTRS(ConcatenateAttrs, "relax.attrs.ConcatenateAttrs") {
TVM_ATTR_FIELD(axis)
.describe(
"The axis at which the input arrays are concatenated."
"Should lie in range `[-ndim, ndim)`.")
.set_default(Integer(0));
}
}; // struct ConcatenateAttrs

} // namespace relax
} // namespace tvm
Expand Down
10 changes: 10 additions & 0 deletions python/tvm/ir/attrs.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,16 @@ class Attrs(Object):
Attrs is passed as the first argument to these functions.
"""

def get_name(self):
"""Get name of the attribute.
Returns
-------
keys : str
name
"""
return _ffi_api.AttrsGetName(self)

def list_field_info(self):
"""Get fields information
Expand Down
1 change: 1 addition & 0 deletions python/tvm/relax/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from . import analysis
from . import transform
from . import expr_functor
from .op import nn

# Expr
Expr = expr.Expr
Expand Down
1 change: 1 addition & 0 deletions python/tvm/relax/op/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,6 @@
from .nn import *
from .op_attrs import *
from .tensor import *
from .transform import *
from . import builtin
from . import memory
File renamed without changes.
Loading

0 comments on commit adb2961

Please sign in to comment.