Skip to content

Commit

Permalink
Merge pull request #220 from tier4/sync-upstream
Browse files Browse the repository at this point in the history
chore: sync upstream
  • Loading branch information
tier4-autoware-public-bot[bot] authored Dec 23, 2022
2 parents 6b1611d + c958fdd commit f5e88d6
Show file tree
Hide file tree
Showing 56 changed files with 1,255 additions and 819 deletions.
38 changes: 38 additions & 0 deletions common/motion_utils/include/motion_utils/trajectory/trajectory.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include <algorithm>
#include <limits>
#include <stdexcept>
#include <utility>
#include <vector>

namespace motion_utils
Expand Down Expand Up @@ -580,6 +581,43 @@ double calcArcLength(const T & points)
return calcSignedArcLength(points, 0, points.size() - 1);
}

template <class T>
inline std::vector<double> calcCurvature(const T & points)
{
std::vector<double> curvature_vec(points.size());

for (size_t i = 1; i < points.size() - 1; ++i) {
const auto p1 = tier4_autoware_utils::getPoint(points.at(i - 1));
const auto p2 = tier4_autoware_utils::getPoint(points.at(i));
const auto p3 = tier4_autoware_utils::getPoint(points.at(i + 1));
curvature_vec.at(i) = (tier4_autoware_utils::calcCurvature(p1, p2, p3));
}
curvature_vec.at(0) = curvature_vec.at(1);
curvature_vec.at(curvature_vec.size() - 1) = curvature_vec.at(curvature_vec.size() - 2);

return curvature_vec;
}

template <class T>
inline std::vector<std::pair<double, double>> calcCurvatureAndArcLength(const T & points)
{
// Note that arclength is for the segment, not the sum.
std::vector<std::pair<double, double>> curvature_arc_length_vec;
curvature_arc_length_vec.push_back(std::pair(0.0, 0.0));
for (size_t i = 1; i < points.size() - 1; ++i) {
const auto p1 = tier4_autoware_utils::getPoint(points.at(i - 1));
const auto p2 = tier4_autoware_utils::getPoint(points.at(i));
const auto p3 = tier4_autoware_utils::getPoint(points.at(i + 1));
const double curvature = tier4_autoware_utils::calcCurvature(p1, p2, p3);
const double arc_length = tier4_autoware_utils::calcDistance2d(points.at(i - 1), points.at(i)) +
tier4_autoware_utils::calcDistance2d(points.at(i), points.at(i + 1));
curvature_arc_length_vec.push_back(std::pair(curvature, arc_length));
}
curvature_arc_length_vec.push_back(std::pair(0.0, 0.0));

return curvature_arc_length_vec;
}

/**
* @brief Calculate distance to the forward stop point from the given src index
*/
Expand Down
4 changes: 4 additions & 0 deletions common/motion_utils/package.xml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,11 @@
<maintainer email="[email protected]">Satoshi Ota</maintainer>
<maintainer email="[email protected]">Takayuki Murooka</maintainer>
<!-- reviewer-->
<maintainer email="[email protected]">Fumiya Watanabe</maintainer>
<maintainer email="[email protected]">Kosuke Takeuchi</maintainer>
<maintainer email="[email protected]">Taiki Tanaka</maintainer>
<maintainer email="[email protected]">Takamasa Horibe</maintainer>
<maintainer email="[email protected]">Tomoya Kimura</maintainer>
<license>Apache License 2.0</license>

<author email="[email protected]">Yutaka Shimizu</author>
Expand Down
3 changes: 3 additions & 0 deletions common/tensorrt_common/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ project(tensorrt_common)
find_package(autoware_cmake REQUIRED)
autoware_package()

# TODO(tensorrt_common): Remove once upgrading to TensorRT 8.5 is complete
add_compile_options(-Wno-deprecated-declarations)

find_package(CUDA)
find_package(CUDNN)
find_package(TENSORRT)
Expand Down
28 changes: 18 additions & 10 deletions common/tvm_utility/include/tvm_utility/pipeline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -182,8 +182,21 @@ class Pipeline
PostProcessorType post_processor_{};
};

// Each node should be specified with a string name and a shape
using NetworkNode = std::pair<std::string, std::vector<int64_t>>;
// NetworkNode
typedef struct
{
// Node name
std::string node_name;

// Network data type configurations
DLDataTypeCode tvm_dtype_code;
int32_t tvm_dtype_bits;
int32_t tvm_dtype_lanes;

// Shape info
std::vector<int64_t> node_shape;
} NetworkNode;

typedef struct
{
// Network info
Expand All @@ -196,11 +209,6 @@ typedef struct
std::string network_graph_path;
std::string network_params_path;

// Network data type configurations
DLDataTypeCode tvm_dtype_code;
int32_t tvm_dtype_bits;
int32_t tvm_dtype_lanes;

// Inference hardware configuration
DLDeviceType tvm_device_type;
int32_t tvm_device_id;
Expand Down Expand Up @@ -278,8 +286,8 @@ class InferenceEngineTVM : public InferenceEngine

for (auto & output_config : config.network_outputs) {
output_.push_back(TVMArrayContainer(
output_config.second, config.tvm_dtype_code, config.tvm_dtype_bits, config.tvm_dtype_lanes,
config.tvm_device_type, config.tvm_device_id));
output_config.node_shape, output_config.tvm_dtype_code, output_config.tvm_dtype_bits,
output_config.tvm_dtype_lanes, config.tvm_device_type, config.tvm_device_id));
}
}

Expand All @@ -290,7 +298,7 @@ class InferenceEngineTVM : public InferenceEngine
if (input[index].getArray() == nullptr) {
throw std::runtime_error("input variable is null");
}
set_input(config_.network_inputs[index].first.c_str(), input[index].getArray());
set_input(config_.network_inputs[index].node_name.c_str(), input[index].getArray());
}

// Execute the inference
Expand Down
32 changes: 17 additions & 15 deletions common/tvm_utility/test/yolo_v2_tiny/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,18 +45,18 @@ class PreProcessorYoloV2Tiny : public tvm_utility::pipeline::PreProcessor<std::s
{
public:
explicit PreProcessorYoloV2Tiny(tvm_utility::pipeline::InferenceEngineTVMConfig config)
: network_input_width(config.network_inputs[0].second[1]),
network_input_height(config.network_inputs[0].second[2]),
network_input_depth(config.network_inputs[0].second[3]),
network_datatype_bytes(config.tvm_dtype_bits / 8)
: network_input_width(config.network_inputs[0].node_shape[1]),
network_input_height(config.network_inputs[0].node_shape[2]),
network_input_depth(config.network_inputs[0].node_shape[3]),
network_input_datatype_bytes(config.network_inputs[0].tvm_dtype_bits / 8)
{
// Allocate input variable
std::vector<int64_t> shape_x{1, network_input_width, network_input_height, network_input_depth};
tvm_utility::pipeline::TVMArrayContainer x{
shape_x,
config.tvm_dtype_code,
config.tvm_dtype_bits,
config.tvm_dtype_lanes,
config.network_inputs[0].tvm_dtype_code,
config.network_inputs[0].tvm_dtype_bits,
config.network_inputs[0].tvm_dtype_lanes,
config.tvm_device_type,
config.tvm_device_id};

Expand Down Expand Up @@ -101,7 +101,8 @@ class PreProcessorYoloV2Tiny : public tvm_utility::pipeline::PreProcessor<std::s

TVMArrayCopyFromBytes(
output.getArray(), image_3f.data,
network_input_width * network_input_height * network_input_depth * network_datatype_bytes);
network_input_width * network_input_height * network_input_depth *
network_input_datatype_bytes);

return {output};
}
Expand All @@ -110,18 +111,18 @@ class PreProcessorYoloV2Tiny : public tvm_utility::pipeline::PreProcessor<std::s
int64_t network_input_width;
int64_t network_input_height;
int64_t network_input_depth;
int64_t network_datatype_bytes;
int64_t network_input_datatype_bytes;
tvm_utility::pipeline::TVMArrayContainer output;
};

class PostProcessorYoloV2Tiny : public tvm_utility::pipeline::PostProcessor<std::vector<float>>
{
public:
explicit PostProcessorYoloV2Tiny(tvm_utility::pipeline::InferenceEngineTVMConfig config)
: network_output_width(config.network_outputs[0].second[1]),
network_output_height(config.network_outputs[0].second[2]),
network_output_depth(config.network_outputs[0].second[3]),
network_datatype_bytes(config.tvm_dtype_bits / 8)
: network_output_width(config.network_outputs[0].node_shape[1]),
network_output_height(config.network_outputs[0].node_shape[2]),
network_output_depth(config.network_outputs[0].node_shape[3]),
network_output_datatype_bytes(config.network_outputs[0].tvm_dtype_bits / 8)
{
// Parse human readable names for the classes
std::ifstream label_file{LABEL_FILENAME};
Expand Down Expand Up @@ -170,7 +171,8 @@ class PostProcessorYoloV2Tiny : public tvm_utility::pipeline::PostProcessor<std:
network_output_width * network_output_height * network_output_depth, 0);
TVMArrayCopyToBytes(
input[0].getArray(), infer.data(),
network_output_width * network_output_height * network_output_depth * network_datatype_bytes);
network_output_width * network_output_height * network_output_depth *
network_output_datatype_bytes);

// Utility function to return data from y given index
auto get_output_data = [this, infer, n_classes, n_anchors, n_coords](
Expand Down Expand Up @@ -232,7 +234,7 @@ class PostProcessorYoloV2Tiny : public tvm_utility::pipeline::PostProcessor<std:
int64_t network_output_width;
int64_t network_output_height;
int64_t network_output_depth;
int64_t network_datatype_bytes;
int64_t network_output_datatype_bytes;
std::vector<std::string> labels{};
std::vector<std::pair<float, float>> anchors{};
};
Expand Down
15 changes: 13 additions & 2 deletions common/tvm_utility/tvm_utility-extras.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Get user-provided variables
set(DOWNLOAD_ARTIFACTS OFF CACHE BOOL "enable artifacts download")
set(MODELZOO_VERSION "2.1.0-20221102" CACHE STRING "targeted ModelZoo version")
set(MODELZOO_VERSION "3.0.0-20221221" CACHE STRING "targeted ModelZoo version")

#
# Download the selected neural network if it is not already present on disk.
Expand All @@ -32,6 +32,7 @@ set(MODELZOO_VERSION "2.1.0-20221102" CACHE STRING "targeted ModelZoo version")
function(get_neural_network MODEL_NAME MODEL_BACKEND DEPENDENCY)
set(DATA_PATH ${CMAKE_CURRENT_SOURCE_DIR}/data)
set(EXTERNALPROJECT_NAME ${MODEL_NAME}_${MODEL_BACKEND})
set(PREPROCESSING "")

# Prioritize user-provided models.
if(IS_DIRECTORY "${DATA_PATH}/user/${MODEL_NAME}")
Expand All @@ -42,6 +43,13 @@ function(get_neural_network MODEL_NAME MODEL_BACKEND DEPENDENCY)
"${DATA_PATH}/models/${MODEL_NAME}/inference_engine_tvm_config.hpp"
COPYONLY
)
if(EXISTS "${DATA_PATH}/user/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp")
configure_file(
"${DATA_PATH}/user/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp"
"${DATA_PATH}/models/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp"
COPYONLY
)
endif()
set(DOWNLOAD_DIR "")
set(SOURCE_DIR "${DATA_PATH}/user/${MODEL_NAME}")
set(INSTALL_DIRECTORY "${DATA_PATH}/user/${MODEL_NAME}")
Expand All @@ -63,7 +71,9 @@ function(get_neural_network MODEL_NAME MODEL_BACKEND DEPENDENCY)
set(SOURCE_DIR "${DATA_PATH}/models/${MODEL_NAME}")
set(INSTALL_DIRECTORY "${DATA_PATH}/models/${MODEL_NAME}")
endif()

if(EXISTS "${DATA_PATH}/models/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp")
set(PREPROCESSING "${DATA_PATH}/models/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp")
endif()
include(ExternalProject)
externalproject_add(${EXTERNALPROJECT_NAME}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
Expand All @@ -72,6 +82,7 @@ function(get_neural_network MODEL_NAME MODEL_BACKEND DEPENDENCY)
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
BUILD_BYPRODUCTS "${DATA_PATH}/models/${MODEL_NAME}/inference_engine_tvm_config.hpp"
BUILD_BYPRODUCTS ${PREPROCESSING}
INSTALL_COMMAND ""
)
install(
Expand Down
1 change: 1 addition & 0 deletions launch/tier4_map_launch/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,5 @@ autoware_package()
ament_auto_package(
INSTALL_TO_SHARE
launch
config
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
/**:
ros__parameters:
enable_whole_load: true
enable_downsampled_whole_load: false
enable_partial_load: false
enable_differential_load: false

# only used when downsample_whole_load enabled
leaf_size: 3.0 # downsample leaf size [m]
6 changes: 5 additions & 1 deletion launch/tier4_map_launch/launch/map.launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,11 @@ def add_launch_arg(name: str, default_value=None, description=None):
),
add_launch_arg(
"pointcloud_map_loader_param_path",
"",
[
FindPackageShare("tier4_map_launch"),
"/config/pointcloud_map_loader.param.yaml",
# ToDo(kminoda): This file should eventually be removed as well as the other components
],
"path to pointcloud_map_loader param file",
),
add_launch_arg("use_intra_process", "false", "use ROS2 component container communication"),
Expand Down
2 changes: 1 addition & 1 deletion launch/tier4_map_launch/launch/map.launch.xml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
<launch>
<!-- Parameter files -->
<arg name="pointcloud_map_loader_param_path"/>
<arg name="pointcloud_map_loader_param_path" default="$(find-pkg-share tier4_map_launch)/config/pointcloud_map_loader.param.yaml"/>

<arg name="map_path" default=""/>
<arg name="lanelet2_map_path" default="$(var map_path)/lanelet2_map.osm"/>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,7 @@
<arg name="input/object0" value="$(var merger/input/objects)"/>
<arg name="input/object1" value="clustering/camera_lidar_fusion/short_range_objects"/>
<arg name="output/object" value="camera_lidar_fusion/objects"/>
<arg name="priority_mode" value="0"/>
</include>
</group>

Expand Down

This file was deleted.

2 changes: 2 additions & 0 deletions localization/gyro_odometer/src/gyro_odometer_core.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ geometry_msgs::msg::TwistWithCovarianceStamped concatGyroAndOdometer(
} else {
twist_with_cov.header.stamp = latest_vehicle_twist_stamp;
}
twist_with_cov.header.frame_id = gyro_queue.front().header.frame_id;
twist_with_cov.twist.twist.linear.x = vx_mean;
twist_with_cov.twist.twist.angular = gyro_mean;

Expand Down Expand Up @@ -211,6 +212,7 @@ void GyroOdometer::callbackImu(const sensor_msgs::msg::Imu::ConstSharedPtr imu_m

sensor_msgs::msg::Imu gyro_base_link;
gyro_base_link.header = imu_msg_ptr->header;
gyro_base_link.header.frame_id = output_frame_;
gyro_base_link.angular_velocity = transformed_angular_velocity.vector;
gyro_base_link.angular_velocity_covariance =
transformCovariance(imu_msg_ptr->angular_velocity_covariance);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ class LIDAR_APOLLO_SEGMENTATION_TVM_LOCAL ApolloLidarSegmentationPreProcessor
const int64_t input_channels;
const int64_t input_width;
const int64_t input_height;
const int64_t datatype_bytes;
const int64_t input_datatype_bytes;
const std::shared_ptr<FeatureGenerator> feature_generator;
TVMArrayContainer output;
};
Expand Down Expand Up @@ -115,7 +115,7 @@ class LIDAR_APOLLO_SEGMENTATION_TVM_LOCAL ApolloLidarSegmentationPostProcessor
const int64_t output_channels;
const int64_t output_width;
const int64_t output_height;
const int64_t datatype_bytes;
const int64_t output_datatype_bytes;
const float32_t objectness_thresh_;
const float32_t score_threshold_;
const float32_t height_thresh_;
Expand Down
Loading

0 comments on commit f5e88d6

Please sign in to comment.