diff --git a/docs/api/python/autotvm.rst b/docs/api/python/autotvm.rst index 5e87785024571..9357d1b6be082 100644 --- a/docs/api/python/autotvm.rst +++ b/docs/api/python/autotvm.rst @@ -18,6 +18,7 @@ tvm.autotvm ----------- .. automodule:: tvm.autotvm +.. automodule:: tvm.autotvm.apply_history_best tvm.autotvm.measure ~~~~~~~~~~~~~~~~~~~ diff --git a/docs/api/python/relay/build_module.rst b/docs/api/python/relay/build_module.rst index 26164bf1ade9a..f470b9aff80f8 100644 --- a/docs/api/python/relay/build_module.rst +++ b/docs/api/python/relay/build_module.rst @@ -18,6 +18,8 @@ tvm.relay.build_module ---------------------- +.. automodule:: tvm.relay.build + .. automodule:: tvm.relay.build_module .. autofunction:: tvm.relay.build_module.build diff --git a/docs/api/python/relay/index.rst b/docs/api/python/relay/index.rst index b286386b12308..03c8a37c9c5a9 100644 --- a/docs/api/python/relay/index.rst +++ b/docs/api/python/relay/index.rst @@ -39,3 +39,4 @@ compiler stack. op scope_builder vision + testing diff --git a/docs/api/python/relay/testing.rst b/docs/api/python/relay/testing.rst new file mode 100644 index 0000000000000..5af5ba72cb7b7 --- /dev/null +++ b/docs/api/python/relay/testing.rst @@ -0,0 +1,21 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +tvm.relay.testing +----------------- + +.. autoclass:: tvm.relay.testing.resnet diff --git a/docs/api/python/tvm.rst b/docs/api/python/tvm.rst index 07c2dbc44765a..56f36130b4b4a 100644 --- a/docs/api/python/tvm.rst +++ b/docs/api/python/tvm.rst @@ -44,7 +44,9 @@ The user facing API for computation declaration. tvm.min tvm.max tvm.tag_scope - + tvm.exp + tvm.intrin + tvm.call_pure_extern .. autofunction:: tvm.var .. autofunction:: tvm.size_var @@ -69,3 +71,6 @@ The user facing API for computation declaration. .. autofunction:: tvm.min .. autofunction:: tvm.max .. autofunction:: tvm.tag_scope +.. autofunction:: tvm.exp +.. autofunction:: tvm.intrin +.. autofunction:: tvm.call_pure_extern diff --git a/docs/conf.py b/docs/conf.py index 3ca622d6ff18f..05f4cfc970d1b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -238,10 +238,11 @@ def setup(app): sphinx_gallery_conf = { 'backreferences_dir': 'gen_modules/backreferences', 'doc_module': ('tvm', 'numpy'), -'reference_url': { - 'tvm': None, - 'matplotlib': 'https://matplotlib.org/', - 'numpy': 'https://docs.scipy.org/doc/numpy/'}, + 'reference_url': { + 'tvm': None, + 'matplotlib': 'https://matplotlib.org/', + 'numpy': 'https://docs.scipy.org/doc/numpy/' + }, 'examples_dirs': examples_dirs, 'gallery_dirs': gallery_dirs, 'subsection_order': subsection_order, diff --git a/docs/dev/relay_add_pass.rst b/docs/dev/relay_add_pass.rst index e842664eaad0e..8a6f8be0aea8f 100644 --- a/docs/dev/relay_add_pass.rst +++ b/docs/dev/relay_add_pass.rst @@ -401,6 +401,6 @@ in `src/relay/pass/`_. .. _include/tvm/relay/transform.h: https://github.com/apache/incubator-tvm/blob/master/include/tvm/relay/transform.h -.. _src/relay/pass: https://github.com/apache/incubator-tvm/tree/master/src/relay/pass +.. _src/relay/pass/: https://github.com/apache/incubator-tvm/tree/master/src/relay/pass .. _src/relay/pass/fold_constant.cc: https://github.com/apache/incubator-tvm/blob/master/src/relay/pass/fold_constant.cc diff --git a/docs/dev/relay_bring_your_own_codegen.rst b/docs/dev/relay_bring_your_own_codegen.rst index b7d5fa9f5fd69..0cced36c95c13 100644 --- a/docs/dev/relay_bring_your_own_codegen.rst +++ b/docs/dev/relay_bring_your_own_codegen.rst @@ -137,7 +137,7 @@ Here we highlight the notes marked in the above code: * **Note 3** is a TVM runtime compatible wrapper function. It accepts a list of input tensors and one output tensor (the last argument), casts them to the right data type, and invokes the subgraph function described in Note 2. In addition, ``TVM_DLL_EXPORT_TYPED_FUNC`` is a TVM macro that generates another function ``gcc_0`` with unified the function arguments by packing all tensors to ``TVMArgs``. As a result, the TVM runtime can directly invoke ``gcc_0`` to execute the subgraph without additional efforts. With the above code generated, TVM is able to compile it along with the rest parts of the graph and export a single library for deployment. -In the rest of this section, we will implement a codegen step-by-step to generate the above code. Your own codegen has to be located at ``src/relay/backend/contrib//``. In our example, we name our codegen "codegen_c" and put it under `here`_. Feel free to check this file for a complete implementation. +In the rest of this section, we will implement a codegen step-by-step to generate the above code. Your own codegen has to be located at ``src/relay/backend/contrib//``. In our example, we name our codegen "codegen_c" and put it under `/src/relay/backend/contrib/codegen_c/ `_. Feel free to check this file for a complete implementation. Specifically, we are going to implement two classes in this file and here is their relationship: @@ -625,7 +625,7 @@ The next step is to implement a customized runtime to make use of the output of Implement a Customized Runtime ============================== -In this section, we will implement a customized TVM runtime step-by-step and register it to TVM runtime modules. The customized runtime should be located at ``src/runtime/contrib//``. In our example, we name our runtime "example_ext_runtime" and put it under `here`_. Feel free to check this file for a complete implementation. +In this section, we will implement a customized TVM runtime step-by-step and register it to TVM runtime modules. The customized runtime should be located at ``src/runtime/contrib//``. In our example, we name our runtime "example_ext_runtime" and put it under `/src/runtime/contrib/example_ext_runtime/ `_. Feel free to check this file for a complete implementation. Again, we first define a customized runtime class as follows. The class has to be derived from TVM ``ModuleNode`` in order to be compatible with other TVM runtime modules. diff --git a/docs/dev/relay_pass_infra.rst b/docs/dev/relay_pass_infra.rst index 8bd5a05534a15..4630c9839f414 100644 --- a/docs/dev/relay_pass_infra.rst +++ b/docs/dev/relay_pass_infra.rst @@ -664,3 +664,5 @@ For more pass infra related examples in Python and C++, please refer to .. _tests/python/relay/test_pass_manager.py: https://github.com/apache/incubator-tvm/blob/master/tests/python/relay/test_pass_manager.py .. _tests/cpp/relay_transform_sequential.cc: https://github.com/apache/incubator-tvm/blob/master/tests/cpp/relay_transform_sequential.cc + +.. _include/tvm/relay/transform.h: https://github.com/apache/incubator-tvm/blob/master/include/tvm/relay/transform.h \ No newline at end of file diff --git a/docs/dev/runtime.rst b/docs/dev/runtime.rst index 5ed5f86ed44be..9e542bf01b447 100644 --- a/docs/dev/runtime.rst +++ b/docs/dev/runtime.rst @@ -1,19 +1,19 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, -.. software distributed under the License is distributed on an -.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -.. KIND, either express or implied. See the License for the -.. specific language governing permissions and limitations -.. under the License. +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. .. _tvm-runtime-system: diff --git a/tutorials/autotvm/tune_relay_arm.py b/tutorials/autotvm/tune_relay_arm.py index 9aba937986175..ea24b16857888 100644 --- a/tutorials/autotvm/tune_relay_arm.py +++ b/tutorials/autotvm/tune_relay_arm.py @@ -15,6 +15,8 @@ # specific language governing permissions and limitations # under the License. """ +.. _tune_relay_arm: + Auto-tuning a convolutional network for ARM CPU =============================================== **Author**: `Lianmin Zheng `_, `Zhao Wu `_, `Eddie Yan `_ diff --git a/tutorials/autotvm/tune_relay_x86.py b/tutorials/autotvm/tune_relay_x86.py index 87d07f9870b25..f44773e544a73 100644 --- a/tutorials/autotvm/tune_relay_x86.py +++ b/tutorials/autotvm/tune_relay_x86.py @@ -15,6 +15,8 @@ # specific language governing permissions and limitations # under the License. """ +.. _tune_relay_x86: + Auto-tuning a convolutional network for x86 CPU =============================================== **Author**: `Yao Wang `_, `Eddie Yan `_ diff --git a/tutorials/language/intrin_math.py b/tutorials/language/intrin_math.py index c1af984a09a15..59bf79d130926 100644 --- a/tutorials/language/intrin_math.py +++ b/tutorials/language/intrin_math.py @@ -21,7 +21,7 @@ While TVM supports basic arithmetic operations. In many cases usually we will need more complicated builtin functions. -For example :code:`exp` to take the exponetial of the function. +For example :code:`exp` to take the exponential of the function. These functions are target system dependent and may have different names of different target platforms. In this tutorial, we will learn @@ -94,6 +94,8 @@ # TVM also allows user to customize the rules during runtime. # The following example customizes CUDA lowering rule for :code:`exp`. # + + def my_cuda_math_rule(op): """Customized CUDA intrinsic lowering rule""" assert isinstance(op, tvm.tir.Call) @@ -106,6 +108,8 @@ def my_cuda_math_rule(op): else: # cannot do translation, return self. return op + + tvm.target.register_intrin_rule("cuda", "exp", my_cuda_math_rule, override=True) ###################################################################### # Register the rule to TVM with override option to override existing rule. @@ -123,10 +127,13 @@ def my_cuda_math_rule(op): # User can easily add new intrinsic by using the intrinsic rule system. # The following example add an intrinsic :code:`mylog` to the system. # + + def mylog(x): """customized log intrinsic function""" return tvm.call_pure_intrin(x.dtype, "mylog", x) + def my_cuda_mylog_rule(op): """CUDA lowering rule for log""" if op.dtype == "float32": @@ -135,6 +142,8 @@ def my_cuda_mylog_rule(op): return tvm.call_pure_extern("float64", "log", op.args[0]) else: return op + + tvm.target.register_intrin_rule("cuda", "mylog", my_cuda_mylog_rule, override=True) n = tvm.var("n") diff --git a/tutorials/relay_quick_start.py b/tutorials/relay_quick_start.py index 6cded3325ad6a..d272a0e315b88 100644 --- a/tutorials/relay_quick_start.py +++ b/tutorials/relay_quick_start.py @@ -55,7 +55,7 @@ # # In this tutorial, we assume we will do inference on our device # and the batch size is set to be 1. Input images are RGB color -# images of size 224 * 224. We can call the :any:`tvm.relay.expr.astext()` +# images of size 224 * 224. We can call the :any:`tvm.relay.expr.TupleWrapper.astext()` # to show the network structure. batch_size = 1 diff --git a/vta/tutorials/optimize/matrix_multiply_opt.py b/vta/tutorials/optimize/matrix_multiply_opt.py index 2d54b97957b2c..2722af594c035 100644 --- a/vta/tutorials/optimize/matrix_multiply_opt.py +++ b/vta/tutorials/optimize/matrix_multiply_opt.py @@ -23,7 +23,7 @@ This tutorial provides an overview on how to use TVM to map matrix multiplication efficiently on the VTA design. -We recommend covering the :ref:`vta-basic-mat-mult` tutorial first. +We recommend covering the :ref:`basic-mat-mult` tutorial first. In this tutorial, we will demonstrate TVM schedule optimizations to break large neural network operators down onto smaller blocks to achieve computation within