Skip to content

Commit

Permalink
Merge branch 'main' into aluo/onnx/support-nllloss-tests
Browse files Browse the repository at this point in the history
* main:
  Fix flaky NMS test by making sure scores are unique (apache#9140)
  [Relay] Merge analysis/context_analysis.cc and transforms/device_annotation.cc (apache#9038)
  [LLVM] Make changes needed for opaque pointers (apache#9138)
  Arm(R) Ethos(TM)-U NPU codegen integration (apache#8849)
  [CI] Split Integration tests out of first phase of pipeline (apache#9128)
  [Meta Schedule][M3b] Runner (apache#9111)
  Fix Google Mock differences between Ubuntu 18.04 and 16.04 (apache#9141)
  [TIR] add loop partition hint pragma (apache#9121)
  fix things (apache#9146)
  [Meta Schedule][M3a] SearchStrategy (apache#9132)
  [Frontend][PyTorch] support for quantized conv_transpose2d op (apache#9133)
  [UnitTest] Parametrized test_conv2d_int8_intrinsics (apache#9143)
  [OpenCL] Remove redundant visit statement in CodeGen. (apache#9144)
  [BYOC] support arbitrary input dims for add/mul/relu of dnnl c_src codegen (apache#9127)
  [Relay][ConvertLayout] Support for qnn.conv2d_transpose (apache#9139)
  add nn.global_avgpool to fq2i (apache#9137)
  [UnitTests] Enable minimum testing on Vulkan target in CI (apache#9093)
  [Torch] Support returning quantized weights and bias for BYOC use cases (apache#9135)
  [Relay] Prepare for new plan_devices.cc (part II) (apache#9130)
  [microTVM][Zephyr] Add MIMXRT1050 board support (apache#9068)
  • Loading branch information
AndrewZhaoLuo committed Sep 29, 2021
2 parents 81a40a8 + 725ae75 commit 6f5aeb1
Show file tree
Hide file tree
Showing 109 changed files with 9,412 additions and 627 deletions.
16 changes: 14 additions & 2 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ stage('Build') {
make(ci_gpu, 'build', '-j2')
pack_lib('gpu', tvm_multilib)
// compiler test
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_vulkan.sh"
sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_other.sh"
make(ci_gpu, 'build2', '-j2')
}
}
Expand All @@ -224,7 +224,6 @@ stage('Build') {
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_unittest.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_fsim.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_vta_tsim.sh"
// sh "${docker_run} ${ci_cpu} ./tests/scripts/task_golang.sh"
Expand Down Expand Up @@ -300,6 +299,19 @@ stage('Unit Test') {
}
}
},
'python3: CPU': {
node('CPU') {
ws(per_exec_ws("tvm/ut-python-cpu")) {
init_git()
unpack_lib('cpu', tvm_multilib_tsim)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_ci_setup.sh"
sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh"
junit "build/pytest-results/*.xml"
}
}
}
},
'python3: i386': {
node('CPU') {
ws(per_exec_ws("tvm/ut-python-i386")) {
Expand Down
6 changes: 6 additions & 0 deletions apps/microtvm/zephyr/template_project/boards.json
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
{
"mimxrt1050_evk": {
"board": "mimxrt1050_evk",
"model": "imxrt10xx",
"is_qemu": false,
"fpu": true
},
"mps2_an521": {
"board": "mps2_an521",
"model": "mps2_an521",
Expand Down
12 changes: 9 additions & 3 deletions apps/microtvm/zephyr/template_project/microtvm_api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,7 @@ def _get_device_args(options):
"nucleo_l4r5zi": {"idVendor": 0x0483, "idProduct": 0x374B},
"nucleo_f746zg": {"idVendor": 0x0483, "idProduct": 0x374B},
"stm32f746g_disco": {"idVendor": 0x0483, "idProduct": 0x374B},
"mimxrt1050_evk": {"idVendor": 0x1366, "idProduct": 0x0105},
}


Expand Down Expand Up @@ -545,6 +546,10 @@ def _find_openocd_serial_port(cls, options):

return ports[0].device

@classmethod
def _find_jlink_serial_port(cls, options):
return cls._find_openocd_serial_port(options)

@classmethod
def _find_serial_port(cls, options):
flash_runner = _get_flash_runner()
Expand All @@ -555,9 +560,10 @@ def _find_serial_port(cls, options):
if flash_runner == "openocd":
return cls._find_openocd_serial_port(options)

raise FlashRunnerNotSupported(
f"Don't know how to deduce serial port for flash runner {flash_runner}"
)
if flash_runner == "jlink":
return cls._find_jlink_serial_port(options)

raise RuntimeError(f"Don't know how to deduce serial port for flash runner {flash_runner}")

def __init__(self, options):
self._options = options
Expand Down
13 changes: 10 additions & 3 deletions docker/install/ubuntu_install_core.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,13 @@ apt-get update && apt-get install -y --no-install-recommends \
libcurl4-openssl-dev libssl-dev libopenblas-dev g++ sudo \
apt-transport-https graphviz pkg-config curl


cd /usr/src/gtest && cmake CMakeLists.txt && make && cp *.a /usr/lib
cd /usr/src/gmock && cmake CMakeLists.txt && make && cp *.a /usr/lib
if [[ -d /usr/src/googletest ]]; then
# Single package source (Ubuntu 18.04)
# googletest is installed via libgtest-dev
cd /usr/src/googletest && cmake CMakeLists.txt && make && cp -v {googlemock,googlemock/gtest}/*.a /usr/lib
else
# Split source package (Ubuntu 16.04)
# libgtest-dev and google-mock
cd /usr/src/gtest && cmake CMakeLists.txt && make && cp -v *.a /usr/lib
cd /usr/src/gmock && cmake CMakeLists.txt && make && cp -v *.a /usr/lib
fi
25 changes: 25 additions & 0 deletions include/tvm/ir/attrs.h
Original file line number Diff line number Diff line change
Expand Up @@ -357,6 +357,31 @@ inline TFunc WithAttr(TFunc input, const std::string& attr_key, ObjectRef attr_v
return input;
}

/*!
* \brief Copy the function or module, but overrides the attributes with the entries from \p attrs.
*
* \param input The thing to annotate (BaseFunc or IRModule)
* \param attrs Key/values attributes to add to \p input.
*
* \tparam TFunc The corresponding function or module type.
*
* \returns The new function or module with updated attributes.
*/
template <typename TFunc>
inline TFunc WithAttrs(TFunc input, Map<String, ObjectRef> attrs) {
using TNode = typename TFunc::ContainerType;
static_assert(TNode::_type_final, "Can only operate on the leaf nodes");
TNode* node = input.CopyOnWrite();
if (node->attrs.defined()) {
for (const auto& pair : attrs) {
node->attrs.CopyOnWrite()->dict.Set(pair.first, pair.second);
}
} else {
node->attrs = DictAttrs(std::move(attrs));
}
return input;
}

// Namespace containing detail implementations
namespace detail {
using runtime::TVMArgValue;
Expand Down
21 changes: 21 additions & 0 deletions include/tvm/ir/function.h
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,27 @@ constexpr const char* kTarget = "target";
* Type: String
*/
constexpr const char* kGlobalSymbol = "global_symbol";

/*!
* \brief The device type which will hold each of the functions parameters.
*
* Only supported on Relay \p Functions. Generally added by the \p PlanDevices pass, but
* may be included as an annotation on user programs.
*
* Type: Array<Integer> (but interpreted as Array<DLDeviceType>)
*/
constexpr const char* kParamDeviceTypes = "param_device_types";

/*!
* \brief The device type which will hold the function result.
*
* Only supported on Relay \p Functions. Generally added by the \p PlanDevices pass, but
* may be included as an annotation on user programs.
*
* Type: Integer (but interpreted as DLDeviceType)
*/
constexpr const char* kResultDeviceType = "result_device_type";

} // namespace attr
} // namespace tvm
#endif // TVM_IR_FUNCTION_H_
4 changes: 2 additions & 2 deletions include/tvm/meta_schedule/builder.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
namespace tvm {
namespace meta_schedule {

/*! \brief The builder's input. */
/*! \brief The builder's input, containing an IRModule and the target. */
class BuilderInputNode : public runtime::Object {
public:
/*! \brief The IRModule to be built. */
Expand Down Expand Up @@ -57,7 +57,7 @@ class BuilderInput : public runtime::ObjectRef {
TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(BuilderInput, runtime::ObjectRef, BuilderInputNode);
};

/*! \brief The builder's output. */
/*! \brief The builder's output, containing the artifact path or error message if any. */
class BuilderResultNode : public runtime::Object {
public:
/*! \brief The path to the built artifact. */
Expand Down
219 changes: 219 additions & 0 deletions include/tvm/meta_schedule/runner.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,219 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_META_SCHEDULE_RUNNER_H_
#define TVM_META_SCHEDULE_RUNNER_H_

#include <tvm/ir/expr.h>
#include <tvm/meta_schedule/arg_info.h>

namespace tvm {
namespace meta_schedule {

/*! \brief The runner's input. */
class RunnerInputNode : public runtime::Object {
public:
/*! \brief The path to the built artifact. */
String artifact_path;
/*! \brief The type of device. */
String device_type;
/*! \brief The argument information. */
Array<ArgInfo> args_info;

void VisitAttrs(tvm::AttrVisitor* v) {
v->Visit("artifact_path", &artifact_path);
v->Visit("device_type", &device_type);
v->Visit("args_info", &args_info);
}

static constexpr const char* _type_key = "meta_schedule.RunnerInput";
TVM_DECLARE_FINAL_OBJECT_INFO(RunnerInputNode, runtime::Object);
};

/*!
* \brief Managed reference to RunnerInputNode
* \sa RunnerInputNode
*/
class RunnerInput : public runtime::ObjectRef {
public:
/*!
* \brief Constructor of RunnerInput
* \param artifact_path The path to the built artifact.
* \param device_type The type of device.
* \param args_info The argument information.
*/
TVM_DLL explicit RunnerInput(String artifact_path, String device_type, Array<ArgInfo> args_info);
TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(RunnerInput, runtime::ObjectRef, RunnerInputNode);
};

/*! \brief The runner's output. */
class RunnerResultNode : public runtime::Object {
public:
/*! \brief The run time in seconds.*/
Optional<Array<FloatImm>> run_secs;
/*! \brief The error message, if any. */
Optional<String> error_msg;

void VisitAttrs(tvm::AttrVisitor* v) {
v->Visit("run_secs", &run_secs);
v->Visit("error_msg", &error_msg);
}

static constexpr const char* _type_key = "meta_schedule.RunnerResult";
TVM_DECLARE_FINAL_OBJECT_INFO(RunnerResultNode, runtime::Object);
};

/*!
* \brief Managed reference to RunnerResultNode
* \sa RunnerResultNode
*/
class RunnerResult : public runtime::ObjectRef {
public:
/*!
* \brief Constructor
* \brief The run time in seconds.
* \brief The error message, if any.
*/
TVM_DLL explicit RunnerResult(Optional<Array<FloatImm>> run_secs, Optional<String> error_msg);
TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(RunnerResult, runtime::ObjectRef, RunnerResultNode);
};

/*!
* \brief A class to asynchronously fetch runner's output.
* \note The API design is consistent with python's concurrent.futures.Future:
* https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Future
*/
class RunnerFutureNode : public runtime::Object {
public:
/*!
* \brief The function type to check whether the runner has finished.
* \return Whether the runner's output is ready.
*/
using FDone = runtime::TypedPackedFunc<bool()>;
/*!
* \brief The function type to fetch runner output if it is ready.
* \return The runner's output.
*/
using FResult = runtime::TypedPackedFunc<RunnerResult()>;

/*! \brief The packed function to check whether the runner has finished. */
FDone f_done;
/*! \brief The packed function to fetch runner output if it is ready. */
FResult f_result;

void VisitAttrs(tvm::AttrVisitor* v) {
// `f_done` is not visited
// `f_result` is not visited
}

/*!
* \brief Check whether the runner has finished.
* \return A boolean indicating whether the runner has finished.
*/
bool Done() const { return f_done(); }
/*!
* \brief Fetch the runner's output if it is ready.
* \return The runner's output.
*/
RunnerResult Result() const { return f_result(); }

static constexpr const char* _type_key = "meta_schedule.RunnerFuture";
TVM_DECLARE_FINAL_OBJECT_INFO(RunnerFutureNode, runtime::Object);
};

/*!
* \brief Managed reference to RunnerFutureNode
* \sa RunnerFutureNode
*/
class RunnerFuture : public runtime::ObjectRef {
public:
using FDone = RunnerFutureNode::FDone;
using FResult = RunnerFutureNode::FResult;

/*!
* \brief Constructor of RunnerFuture
* \param f_done The packed function to check whether the runner has finished.
* \param f_result The packed function to fetch runner output if it is ready.
*/
TVM_DLL explicit RunnerFuture(FDone f_done, FResult f_result);
TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(RunnerFuture, runtime::ObjectRef,
RunnerFutureNode);
};

/*! \brief The abstract runner interface. */
class RunnerNode : public runtime::Object {
public:
/*!
* \brief The function type to run the built artifacts and get runner futures.
* \param input The runner's inputs.
* \return The runner futures.
* \sa RunnerFuture
*/
using FRun = runtime::TypedPackedFunc<Array<RunnerFuture>(Array<RunnerInput>)>;

/*! \brief Default destructor */
virtual ~RunnerNode() = default;

/*!
* \brief Run the built artifact and get runner futures.
* \param runner_inputs The runner's inputs.
* \return The runner futures.
*/
virtual Array<RunnerFuture> Run(Array<RunnerInput> runner_inputs) = 0;

static constexpr const char* _type_key = "meta_schedule.Runner";
TVM_DECLARE_BASE_OBJECT_INFO(RunnerNode, runtime::Object);
};

/*!
* \brief Managed reference to RunnerNode
* \sa RunnerNode
*/
class Runner : public runtime::ObjectRef {
public:
using FRun = RunnerNode::FRun;

/*!
* \brief Create a runner with customized build method on the python-side.
* \param f_run The packed function to run the built artifacts and get runner futures.
* \return The runner created.
*/
TVM_DLL static Runner PyRunner(FRun f_run);
TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(Runner, runtime::ObjectRef, RunnerNode);
};

/*! \brief An abstract runner with customized build method on the python-side. */
class PyRunnerNode : public RunnerNode {
public:
/*! \brief The packed function to run the built artifacts and get runner futures. */
FRun f_run;

void VisitAttrs(tvm::AttrVisitor* v) {
// `f_run` is not visited
}

Array<RunnerFuture> Run(Array<RunnerInput> runner_inputs) final { return f_run(runner_inputs); }

static constexpr const char* _type_key = "meta_schedule.PyRunner";
TVM_DECLARE_FINAL_OBJECT_INFO(PyRunnerNode, RunnerNode);
};

} // namespace meta_schedule
} // namespace tvm

#endif // TVM_META_SCHEDULE_RUNNER_H_
Loading

0 comments on commit 6f5aeb1

Please sign in to comment.