Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
fix rebase
Browse files Browse the repository at this point in the history
  • Loading branch information
Fan committed Oct 25, 2019
1 parent 5bc4b36 commit 3f594b4
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 50 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -763,7 +763,7 @@ if(USE_TVM_OP)
endif()
endif()

set(TVM_OP_COMPILE_OPTIONS "-o${CMAKE_CURRENT_BINARY_DIR}/libtvmop.so --config ${CMAKE_CURRENT_BINARY_DIR}/tvmop.conf")
set(TVM_OP_COMPILE_OPTIONS "-o${CMAKE_CURRENT_BINARY_DIR}/libtvmop.so" "--config" "${CMAKE_CURRENT_BINARY_DIR}/tvmop.conf")
if(CUDA_ARCH_BIN)
set(TVM_OP_COMPILE_OPTIONS "${TVM_OP_COMPILE_OPTIONS}" "--cuda-arch" "${CUDA_ARCH_BIN}")
endif()
Expand Down
52 changes: 14 additions & 38 deletions contrib/tvmop/compile.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,46 +79,17 @@ def get_cuda_arch(arch):

func_list_llvm = []
func_list_cuda = []
config_spaces = ConfigSpaces()

# TODO: attach instruction features to the library, e.g., avx-512, etc.
for op in __OP_DEF__:
if tvm.module.enabled(get_target(op.target)):
func_list = func_list_llvm if op.target == "cpu" else func_list_cuda
for each_kwargs in op.arg_combination:
if (op.attrs_valid(**each_kwargs)):
name = op.name \
+ ''.join(["{}_{}".format(key, each_kwargs[key]) for key in op.attrs])
if op.dispatch is True:
config_space = autotvm.ConfigSpace()
with autotvm.task.ApplyConfig(config_space):
sch, args = op.func(fallback=False, **each_kwargs)
# register dispatch schedules
for i in range(len(config_space)):
config_entity = config_space.get(i)
with autotvm.task.ApplyConfig(config_entity):
sch, args = op.func(fallback=False, **each_kwargs)
subname = name + "index_" + str(i) + \
''.join(["%s_%d" % (arg.dtype, len(arg.shape)) for arg in args])
func_lower = tvm.lower(sch, args,
name=subname,
binds=op.get_binds(args))
func_list.append(func_lower)
# register config space
config_spaces[name] = ConfigSpace.from_tvm(config_space)
# register fallback schedule
config_space = autotvm.ConfigSpace()
with autotvm.task.ApplyConfig(config_space):
sch, args = op.func(fallback=True, **each_kwargs)
subname = name + "fallback" + \
''.join(["%s_%d" % (arg.dtype, len(arg.shape)) for arg in args])
func_lower = tvm.lower(sch, args, name=subname, binds=op.get_binds(args))
func_list.append(func_lower)
else:
sch, args = op.func(**each_kwargs)
subname = name + ''.join(["%s_%d" % (arg.dtype, len(arg.shape)) for arg in args])
func_lower = tvm.lower(sch, args, name=subname, binds=op.get_binds(args))
func_list.append(func_lower)
for operator_def in __OP_DEF__:
for sch, args, name in operator_def.invoke_all():
name = operator_def.get_op_name(name, args)
if tvm.module.enabled(get_target(operator_def.target)):
func_list = func_list_llvm if operator_def.target == "cpu" else func_list_cuda
func_lower = tvm.lower(sch, args,
name=name,
binds=operator_def.get_binds(args))
func_list.append(func_lower)

lowered_funcs = {get_target("cpu"): func_list_llvm}
if len(func_list_cuda) > 0:
Expand All @@ -131,5 +102,10 @@ def get_cuda_arch(arch):
set_cuda_target_arch(cuda_arch)
func_binary = tvm.build(lowered_funcs, name="tvmop")
func_binary.export_library(arguments.target_path)

config_spaces = ConfigSpaces()
for operator_def in __OP_DEF__:
for config_space, name in operator_def.get_config_spaces():
config_spaces[name] = ConfigSpace.from_tvm(config_space)
with open(arguments.config_path, "w") as f:
json.dump(config_spaces.to_json_dict(), f)
7 changes: 1 addition & 6 deletions contrib/tvmop/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,4 @@
# specific language governing permissions and limitations
# under the License.

<<<<<<< HEAD
from . import umath, fromnumeric
=======
# coding: utf-8
from . import multiarray
>>>>>>> infra for dispatch tvm op
from . import umath, fromnumeric, multiarray
39 changes: 34 additions & 5 deletions contrib/tvmop/opdef.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,12 +77,41 @@ def __call__(self, *args, **kwargs):
def invoke_all(self):
for each_kwargs in self.arg_combination:
if self.attrs_valid(**each_kwargs):
sch, args = self.func(**each_kwargs)
name = self.name \
+ ''.join(["{}_{}".format(key, each_kwargs[key]) for key in self.attrs]) \
+ ''.join(["%s_%d" % (arg.dtype, len(arg.shape))
for arg in args if hasattr(arg, 'shape')])
yield sch, args, name
+ ''.join(["{}_{}".format(key, each_kwargs[key]) for key in self.attrs])
if self.dispatch is False:
sch, args = self.func(**each_kwargs)
yield sch, args, name
else:
# register dispatch schedules
config_space = autotvm.ConfigSpace()
with autotvm.task.ApplyConfig(config_space):
sch, args = self.func(fallback=False, **each_kwargs)
for i in range(len(config_space)):
config_entity = config_space.get(i)
with autotvm.task.ApplyConfig(config_entity):
sch, args = self.func(fallback=False, **each_kwargs)
subname = name + "index_" + str(i)
yield sch, args, subname
# register fallback schedule
config_space = autotvm.ConfigSpace()
with autotvm.task.ApplyConfig(config_space):
sch, args = self.func(fallback=True, **each_kwargs)
subname = name + "fallback"
yield sch, args, subname

def get_op_name(self, name, args):
return name + ''.join(["%s_%d" % (arg.dtype, len(arg.shape)) for arg in args if hasattr(arg, 'shape')])

def get_config_spaces(self):
for each_kwargs in self.arg_combination:
if self.attrs_valid(**each_kwargs) and self.dispatch is True:
name = self.name \
+ ''.join(["{}_{}".format(key, each_kwargs[key]) for key in self.attrs])
config_space = autotvm.ConfigSpace()
with autotvm.task.ApplyConfig(config_space):
self.func(fallback=False, **each_kwargs)
yield config_space, name

def get_binds(self, args):
if self.auto_broadcast:
Expand Down

0 comments on commit 3f594b4

Please sign in to comment.