Skip to content

Commit

Permalink
relay op strategy
Browse files Browse the repository at this point in the history
fix lint

bitpack strategy

bitserial_dense (apache#6)

* update strategy

* address comments

fix a few topi test

Dense strategy (apache#5)

* dense

* add biforst; remove comments

* address comment

Refactor x86 conv2d_NCHWc (apache#4)

* Refactor x86 conv2d

* Add x86 depthwise_conv2d_NCHWc

* Add back topi x86 conv2d_nchw

* Merge x86 conv2d_nchw and conv2d_NCHWc

* Minor fix for x86 conv2d

fix more strategy

Add x86 conv2d_NCHWc_int8 strategy (apache#8)

* Add x86 conv2d_NCHWc_int8 strategy

* Remove contrib_conv2d_nchwc_int8

* Fix generic conv2d_NCHWc for int8

* Fix topi arm_cpu conv2d_NCHWc_int8

update x86 conv2d

enable specify relay ops to be tuned for autotvm

add cuda conv2d strategy

add conv2d strategy for rocm

add conv2d strategy for hls

add conv2d strategy for arm cpu

add conv2d strategy for mali

add conv2d strategy for bifrost

add conv2d strategy for intel graphics

clean up and fix lint

remove template keys from autotvm

remove 2 in the func name

address comments

fix
  • Loading branch information
icemelon committed Feb 6, 2020
1 parent 62543d4 commit 147e76d
Show file tree
Hide file tree
Showing 200 changed files with 6,895 additions and 5,790 deletions.
191 changes: 184 additions & 7 deletions include/tvm/relay/op_attr_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include <tvm/relay/type.h>
#include <tvm/relay/expr.h>
#include <tvm/target/target.h>
#include <tvm/target/generic_func.h>
#include <tvm/tir/data_layout.h>
#include <string>

Expand Down Expand Up @@ -105,9 +106,8 @@ using TShapeDataDependant = bool;
*/
using FTVMCompute = runtime::TypedPackedFunc<
Array<te::Tensor>(const Attrs& attrs,
const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target)>;
const Array<te::Tensor>& inputs,
const Type& out_type)>;

/*!
* \brief Build the computation schedule for
Expand All @@ -123,6 +123,16 @@ using FTVMSchedule = runtime::TypedPackedFunc<
const Array<te::Tensor>& outs,
const Target& target)>;

/*!
* \brief Generate the strategy of operators. This function is a generic
* function and can be re-defined for different targets.
*
* The function signature of generic function is:
* OpStrategy(const Attrs& attrs, const Array<Tensor>& inputs,
* const Type& out_type, const Target& target)
*/
using FTVMStrategy = GenericFunc;

/*!
* \brief Alternate the layout of operators or replace the
* operator with other expressions. This function will be invoked
Expand All @@ -136,7 +146,8 @@ using FTVMSchedule = runtime::TypedPackedFunc<
using FTVMAlterOpLayout = runtime::TypedPackedFunc<
Expr(const Attrs& attrs,
const Array<Expr>& args,
const Array<te::Tensor>& tinfos)>;
const Array<te::Tensor>& tinfos,
const Type& out_type)>;

/*!
* \brief Convert the layout of operators or replace the
Expand Down Expand Up @@ -191,9 +202,7 @@ using FForwardRewrite = runtime::TypedPackedFunc<
* \brief Gradient for a specific op.
*
* \param orig_call the original Expr.
*
* \param output_grad the gradient of the Expr.
*
* \return the gradient for each parameters.
*/
using FPrimalGradient = runtime::TypedPackedFunc<tvm::Array<Expr>(const Expr& orig_call,
Expand All @@ -207,14 +216,182 @@ enum AnyCodegenStrategy {
kVariableDimensions
};

/* \brief A runtime representation of shape. */
/*! \brief A runtime representation of shape. */
using Shape = Array<IndexExpr>;

using FShapeFunc = runtime::TypedPackedFunc<
Array<te::Tensor>(const Attrs& attrs,
const Array<te::Tensor>& inputs,
const Array<IndexExpr>& out_ndims)>;

/*!
* \brief Operator implementation in TVM.
*/
class OpImplementNode : public Object {
public:
/*! \brief Compute function */
FTVMCompute fcompute;
/*! \brief Schedule function */
FTVMSchedule fschedule;
/*! \brief Priority level */
Integer plevel;

void VisitAttrs(tvm::AttrVisitor* v) {
v->Visit("plevel", &plevel);
}

static constexpr const char* _type_key = "relay.OpImplement";
TVM_DECLARE_FINAL_OBJECT_INFO(OpImplementNode, Object);
};

/*!
* \brief Operator implementation class.
*/
class OpImplement : public ObjectRef {
public:
/*! \brief default constructor */
OpImplement() {}
/*! \brief constructor from node pointer */
explicit OpImplement(ObjectPtr<Object> n) : ObjectRef(n) {}
/*!
* \brief access the internal node container
* \return the pointer to the internal node container
*/
inline const OpImplementNode* operator->() const;
/*!
* \brief Invoke the operator compute function.
* \param attrs The attribute of the primitive
* \param inputs The input tensors.
* \param out_type The output type information.
* \return The output compute description of the operator.
*/
Array<te::Tensor> Compute(const Attrs& attrs,
const Array<te::Tensor>& inputs,
const Type& out_type);
/*!
* \brief Build the computation schedule.
* \param attrs The attribute of the node.
* \param outs The output tensors.
* \param target The build target.
* \return The computation schedule.
*/
te::Schedule Schedule(const Attrs& attrs,
const Array<te::Tensor>& outs,
const Target& target);
};

/*!
* \brief Specialized implementations for operators under certain conditions.
*/
class OpSpecializationNode : public Object {
public:
/*! \brief List of implementations. */
Array<OpImplement> implements;
/*! \brief Condition to enable the specialization.
* Could be undefined to represent generic case. */
te::SpecializedCondition condition;

void VisitAttrs(tvm::AttrVisitor* v) {
v->Visit("condition", &condition);
v->Visit("implements", &implements);
}

static constexpr const char* _type_key = "relay.OpSpecialization";
TVM_DECLARE_FINAL_OBJECT_INFO(OpSpecializationNode, ExprNode);
};

/*!
* \brief Operator specialization class.
*/
class OpSpecialization : public ObjectRef {
public:
OpSpecialization() {}
explicit OpSpecialization(ObjectPtr<Object> n) : ObjectRef(n) {}
/*!
* \brief access the internal node container
* \return the pointer to the internal node container
*/
inline const OpSpecializationNode* operator->() const;
/*!
* \brief access the internal node container
* \return the pointer to the internal node container
*/
inline OpSpecializationNode* operator->();
/*!
* \brief Add an implementation.
* \param compute Compute function
* \param schedule Schedule function
* \param plevel Priority level of this implemntation.
*/
void AddImplement(FTVMCompute fcompute, FTVMSchedule fschedule,
int plevel);
};

/*!
* \brief Operator strategy to choose implementation.
*/
class OpStrategyNode : public Object {
public:
/*! \brief List of operator specializations. */
Array<OpSpecialization> specializations;

void VisitAttrs(tvm::AttrVisitor* v) {
v->Visit("specializations", &specializations);
}

static constexpr const char* _type_key = "relay.OpStrategy";
TVM_DECLARE_FINAL_OBJECT_INFO(OpStrategyNode, ExprNode);
};

/*!
* \brief Operator strategy class.
*/
class OpStrategy : public ObjectRef {
public:
/*! \brief default constructor */
OpStrategy() {}
/*! \brief constructor from node pointer */
explicit OpStrategy(ObjectPtr<Object> n) : ObjectRef(n) {}
/*!
* \brief access the internal node container
* \return the pointer to the internal node container
*/
inline const OpStrategyNode* operator->() const;
/*!
* \brief access the internal node container
* \return the pointer to the internal node container
*/
inline OpStrategyNode* operator->();
/*!
* \brief Add an implementation.
* \param compute Compute function
* \param schedule Schedule function
* \param plevel Priority level of this implementation.
*/
void AddImplement(FTVMCompute fcompute, FTVMSchedule fschedule, int plevel);
};

// implementations
inline const OpImplementNode* OpImplement::operator->() const {
return static_cast<const OpImplementNode*>(get());
}

inline const OpSpecializationNode* OpSpecialization::operator->() const {
return static_cast<const OpSpecializationNode*>(get());
}

inline OpSpecializationNode* OpSpecialization::operator->() {
return static_cast<OpSpecializationNode*>(get_mutable());
}

inline const OpStrategyNode* OpStrategy::operator->() const {
return static_cast<const OpStrategyNode*>(get());
}

inline OpStrategyNode* OpStrategy::operator->() {
return static_cast<OpStrategyNode*>(get_mutable());
}

} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_ATTR_TYPES_H_
55 changes: 55 additions & 0 deletions include/tvm/te/schedule.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include <tvm/tir/expr.h>
#include <tvm/te/tensor.h>
#include <tvm/te/tensor_intrin.h>
#include <tvm/support/with.h>

#include <string>
#include <unordered_map>
Expand Down Expand Up @@ -742,6 +743,55 @@ class SingletonNode : public IterVarRelationNode {
TVM_DECLARE_FINAL_OBJECT_INFO(SingletonNode, IterVarRelationNode);
};

class SpecializedConditionNode;

/*!
* \brief Specialized condition to enable op specialization
*/
class SpecializedCondition : public ObjectRef {
public:
SpecializedCondition() {}
explicit SpecializedCondition(ObjectPtr<Object> n) : ObjectRef(n) {}
/*!
* \brief Get the current specialized condition.
* \return The current specialized condition.
*/
TVM_DLL static SpecializedCondition Current();

const SpecializedConditionNode* operator->() const;

using ContainerType = SpecializedConditionNode;
class Internal;
private:
// enable with syntax.
friend class Internal;
friend class With<SpecializedCondition>;
/*! \brief Push a new specialized condition onto the thread local stack. */
TVM_DLL void EnterWithScope();
/*! \brief Pop a specialized condition off the thread local context stack. */
TVM_DLL void ExitWithScope();
};

/*! \brief Container for specialization conditions. */
class SpecializedConditionNode : public Object {
public:
/*!
* \brief List of conditions in conjunctive joint form (CNF).
* Each condition should be a simple expression, e.g., n > 16, m % 8 == 0, etc.,
* where n, m are tvm::Var that represents a dimension in the tensor shape.
*/
Array<PrimExpr> clauses;

void VisitAttrs(AttrVisitor* v) {
v->Visit("clauses", &clauses);
}

static SpecializedCondition make(Array<PrimExpr> conditions);

static constexpr const char* _type_key = "SpecializedCondition";
TVM_DECLARE_FINAL_OBJECT_INFO(SpecializedConditionNode, Object);
};


// implementations
inline const StageNode* Stage::operator->() const {
Expand All @@ -765,6 +815,11 @@ inline const IterVarRelationNode* IterVarRelation::operator->() const {
inline const IterVarAttrNode* IterVarAttr::operator->() const {
return static_cast<const IterVarAttrNode*>(get());
}

inline const SpecializedConditionNode* SpecializedCondition::operator->() const {
return static_cast<const SpecializedConditionNode*>(get());
}

} // namespace te
} // namespace tvm
#endif // TVM_TE_SCHEDULE_H_
2 changes: 1 addition & 1 deletion python/tvm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@
from .api import *
from .intrin import *
from .tensor_intrin import decl_tensor_intrin
from .schedule import create_schedule
from .schedule import create_schedule, current_specialization
from .build_module import build, lower, build_config
from .tag import tag_scope

Expand Down
4 changes: 2 additions & 2 deletions python/tvm/autotvm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@
from .measure import measure_option, MeasureInput, MeasureResult, MeasureErrorNo, \
LocalBuilder, LocalRunner, RPCRunner
from .tuner import callback
from .task import template, get_config, create, ConfigSpace, ConfigEntity, \
register_topi_compute, register_topi_schedule, \
from .task import get_config, create, ConfigSpace, ConfigEntity, \
register_topi_compute, register_topi_schedule, register_customized_task, \
DispatchContext, FallbackContext, ApplyHistoryBest as apply_history_best, \
ApplyGraphBest as apply_graph_best
from .env import GLOBAL_SCOPE
Loading

0 comments on commit 147e76d

Please sign in to comment.