diff --git a/.clang-format b/.clang-format index 028058749d475..1f39a8c4067c7 100644 --- a/.clang-format +++ b/.clang-format @@ -22,4 +22,3 @@ ReflowComments: false # because it crushes include-groups unless IncludeCategories are defined properly. # It was introduced in https://github.com/llvm-mirror/clang/commit/62e3198c4f5490a1c60ba51d81fe2e1f0dc99135 IncludeBlocks: Preserve -... diff --git a/.github/workflows/pre-commit.yaml b/.github/workflows/pre-commit.yaml new file mode 100644 index 0000000000000..92901723b1822 --- /dev/null +++ b/.github/workflows/pre-commit.yaml @@ -0,0 +1,23 @@ +name: pre-commit + +on: + pull_request: + workflow_dispatch: + +jobs: + pre-commit: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Setup Python + uses: actions/setup-python@v2 + + - name: Run pre-commit + uses: pre-commit/action@v2.0.3 + with: + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 0000000000000..2bdc94ac71264 --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,7 @@ +default: true +MD013: false +MD024: + siblings_only: true +MD029: false +MD033: false +MD041: false diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000000..271fd3a1c5754 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,43 @@ +# To use: +# +# pre-commit run -a +# +# Or: +# +# pre-commit install # (runs every time you commit in git) +# +# To update this file: +# +# pre-commit autoupdate +# +# See https://github.com/pre-commit/pre-commit + +repos: + # Standard hooks + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: check-json + - id: check-merge-conflict + - id: check-toml + - id: check-xml + - id: check-yaml + - id: detect-private-key + - id: double-quote-string-fixer + - id: end-of-file-fixer + - id: mixed-line-ending + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + + - repo: https://github.com/igorshubovych/markdownlint-cli + rev: v0.27.1 + hooks: + - id: markdownlint + args: ["-c", ".markdownlint.yaml", "--fix"] + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v2.3.2 + hooks: + - id: prettier + +exclude: ".svg" diff --git a/.prettierrc.yaml b/.prettierrc.yaml new file mode 100644 index 0000000000000..ef9b1b85a7257 --- /dev/null +++ b/.prettierrc.yaml @@ -0,0 +1,2 @@ +printWidth: 120 +tabWidth: 2 diff --git a/README.md b/README.md index a28022a76e003..c85c6ac11706c 100644 --- a/README.md +++ b/README.md @@ -4,5 +4,6 @@ AutowareArchitectureProposal is a repository to explore and establish the architecture design of Autoware, an autonomous driving software managed by Autoware Foundation. -# Documentation +## Documentation + See [Github Pages](https://tier4.github.io/autoware.proj/tree/main/) diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg index b0da4505f9421..3d983f906356c 100644 --- a/ansible/ansible.cfg +++ b/ansible/ansible.cfg @@ -1,2 +1,2 @@ [defaults] -cow_selection = random \ No newline at end of file +cow_selection = random diff --git a/ansible/roles/cuda/tasks/main.yml b/ansible/roles/cuda/tasks/main.yml index 6004c3c07f0df..a829999f6b5d3 100644 --- a/ansible/roles/cuda/tasks/main.yml +++ b/ansible/roles/cuda/tasks/main.yml @@ -28,7 +28,7 @@ - name: CUDA (install CUDA 11.1) become: yes apt: - name: cuda-11-1 # nvidia-driver should be greater than 455 + name: cuda-11-1 # nvidia-driver should be greater than 455 update_cache: yes - name: CUDA (add path >> bashrc) diff --git a/ansible/roles/pacmod/tasks/main.yml b/ansible/roles/pacmod/tasks/main.yml index d7fab07cf33d9..e97d1888c4222 100644 --- a/ansible/roles/pacmod/tasks/main.yml +++ b/ansible/roles/pacmod/tasks/main.yml @@ -4,4 +4,3 @@ - import_tasks: pacmod.yml when: - not kvaser_library_only - diff --git a/docs/Credits.md b/docs/Credits.md index 2877a85d67805..0eb07cd1e250e 100644 --- a/docs/Credits.md +++ b/docs/Credits.md @@ -1,18 +1,18 @@ Certain AutowareArchitectureProposal packages rely on pre-trained CNN models provided by other open source repositories. - tensorrt_yolo3 - - The pre-trained models originate from [TRTForYolov3](https://github.com/lewes6369/TensorRT-Yolov3). - - [Weights for the trained model](https://drive.google.com/drive/folders/18OxNcRrDrCUmoAMgngJlhEglQ1Hqk_NJ) (416 folder) are automatically downloaded during the build process. + - The pre-trained models originate from [TRTForYolov3](https://github.com/lewes6369/TensorRT-Yolov3). + - [Weights for the trained model](https://drive.google.com/drive/folders/18OxNcRrDrCUmoAMgngJlhEglQ1Hqk_NJ) (416 folder) are automatically downloaded during the build process. - traffic_light_fine_detector - - The trained model in this package is based on the [pjreddie's YOLO .weights file](https://pjreddie.com/media/files/yolov3.weights), with additional fine-tuning by Tier IV using [Darknet](https://github.com/pjreddie/darknet). - - After fine-tuning, the new weights for the trained model are converted into an ONNX file using [Python](https://github.com/tier4/AutowareArchitectureProposal.iv/blob/master/src/perception/traffic_light_recognition/traffic_light_fine_detector/scripts/yolov3_to_onnx.py). + - The trained model in this package is based on the [pjreddie's YOLO .weights file](https://pjreddie.com/media/files/yolov3.weights), with additional fine-tuning by Tier IV using [Darknet](https://github.com/pjreddie/darknet). + - After fine-tuning, the new weights for the trained model are converted into an ONNX file using [Python](https://github.com/tier4/AutowareArchitectureProposal.iv/blob/master/src/perception/traffic_light_recognition/traffic_light_fine_detector/scripts/yolov3_to_onnx.py). - lidar_apollo_instance_segmentation - - This package makes use of three pre-trained models provided by [Apollo Auto](https://github.com/ApolloAuto). - - The following files are automatically downloaded during the build process: - - [VLP-16](https://github.com/ApolloAuto/apollo/raw/88bfa5a1acbd20092963d6057f3a922f3939a183/modules/perception/production/data/perception/lidar/models/cnnseg/velodyne16/deploy.caffemodel) - - [HDL-64](https://github.com/ApolloAuto/apollo/raw/88bfa5a1acbd20092963d6057f3a922f3939a183/modules/perception/production/data/perception/lidar/models/cnnseg/velodyne64/deploy.caffemodel) - - [VLS-128](https://github.com/ApolloAuto/apollo/raw/91844c80ee4bd0cc838b4de4c625852363c258b5/modules/perception/production/data/perception/lidar/models/cnnseg/velodyne128/deploy.caffemodel) + - This package makes use of three pre-trained models provided by [Apollo Auto](https://github.com/ApolloAuto). + - The following files are automatically downloaded during the build process: + - [VLP-16](https://github.com/ApolloAuto/apollo/raw/88bfa5a1acbd20092963d6057f3a922f3939a183/modules/perception/production/data/perception/lidar/models/cnnseg/velodyne16/deploy.caffemodel) + - [HDL-64](https://github.com/ApolloAuto/apollo/raw/88bfa5a1acbd20092963d6057f3a922f3939a183/modules/perception/production/data/perception/lidar/models/cnnseg/velodyne64/deploy.caffemodel) + - [VLS-128](https://github.com/ApolloAuto/apollo/raw/91844c80ee4bd0cc838b4de4c625852363c258b5/modules/perception/production/data/perception/lidar/models/cnnseg/velodyne128/deploy.caffemodel) diff --git a/docs/Readme.md b/docs/Readme.md index 9d62640613da6..9d25356d925fc 100644 --- a/docs/Readme.md +++ b/docs/Readme.md @@ -4,9 +4,9 @@ AutowareArchitectureProposal is a repository to explore and establish the architecture design of Autoware, an autonomous driving software managed by Autoware Foundation. -There already exists [Autoware.Auto](https://gitlab.com/autowarefoundation/autoware.auto) repository in Autoware Foundation GitLab. The architecture investigation, however, was done as a separate repository rather than a fork to explore architecture without prejudice from the existing source code. +There already exists [Autoware.Auto](https://gitlab.com/autowarefoundation/autoware.auto) repository in Autoware Foundation GitLab. The architecture investigation, however, was done as a separate repository rather than a fork to explore architecture without prejudice from the existing source code. -The established architecture will be presented to Autoware.Auto which will then be reviewed by the community members and be used to improve Autoware.Auto. AutowareArchitectureProposal also contains new functions that do not yet exist in Autoware.Auto to verify that the architecture is feasible for various use cases. These functions are also planned to be refactored and merged into Autoware.Auto. The details are explained in the [Future plans](#future-plans) section. +The established architecture will be presented to Autoware.Auto which will then be reviewed by the community members and be used to improve Autoware.Auto. AutowareArchitectureProposal also contains new functions that do not yet exist in Autoware.Auto to verify that the architecture is feasible for various use cases. These functions are also planned to be refactored and merged into Autoware.Auto. The details are explained in the [Future plans](#future-plans) section. ## Note for non-Tier IV members @@ -17,7 +17,7 @@ The established architecture will be presented to Autoware.Auto which will then ## Target AutowareArchitectureProposal aims to realize autonomous driving in various environments. -Autonomous driving on public roads is an extremely challenging project, and it cannot be achieved in a short period of time. Therefore we are trying to accumulate technology by applying the current AutowareArchitectureProposal to more restricted use cases such as in-factory transportation. At the same time, demonstration experiments in public roads are also in progress. +Autonomous driving on public roads is an extremely challenging project, and it cannot be achieved in a short period of time. Therefore we are trying to accumulate technology by applying the current AutowareArchitectureProposal to more restricted use cases such as in-factory transportation. At the same time, demonstration experiments in public roads are also in progress. ## Future plans @@ -25,4 +25,3 @@ Again, autonomous driving is an extremely challenging project and this cannot be As part of Tier IV's commitment to collaboration with the AWF and its members, we plan to merge the additional functionality of AutowareArchitectureProposal to Autoware.Auto. Note that since Autoware.Auto has its own scope and ODD (Operational Design Domain, prerequisite environmental conditions for an automatic driving system to operate) that needs to be achieved, not all the features in AutowareArchitectureProposal will be required. We keep using AutowareArchitectureProposal for some time, but remember that the core of our products will shift to Autoware.Auto. - diff --git a/docs/css/tier4_color.css b/docs/css/tier4_color.css index edd1d868e7ba9..c43ae74777449 100644 --- a/docs/css/tier4_color.css +++ b/docs/css/tier4_color.css @@ -1,6 +1,6 @@ :root { - --md-primary-fg-color: #22a3cd; - --md-primary-fg-color--light: #22a3cd; - --md-primary-fg-color--dark: #22a3cd; - --md-accent-fg-color: #065479; + --md-primary-fg-color: #22a3cd; + --md-primary-fg-color--light: #22a3cd; + --md-primary-fg-color--dark: #22a3cd; + --md-accent-fg-color: #065479; } diff --git a/docs/design/repository/Repository.md b/docs/design/repository/Repository.md index 34c3d99429eb2..0438f12108aaf 100644 --- a/docs/design/repository/Repository.md +++ b/docs/design/repository/Repository.md @@ -1,9 +1,10 @@ -TODO +# TODO + ## Repository Overview ## What is autoware.proj autoware.proj is a meta-repository for AutowareArchitectureProposal. -Since AutowareArchitectureProposal is made up of code stored in multiple, version-specific GitHub repositories, creating a new build environment would involve individually importing each repository which is both time-consuming and prone to error. To avoid both of these problems, autoware.proj was created as a meta-repository to streamline the management of all necessary repositories through the use of [vcstool](https://github.com/dirk-thomas/vcstool). +Since AutowareArchitectureProposal is made up of code stored in multiple, version-specific GitHub repositories, creating a new build environment would involve individually importing each repository which is both time-consuming and prone to error. To avoid both of these problems, autoware.proj was created as a meta-repository to streamline the management of all necessary repositories through the use of [vcstool](https://github.com/dirk-thomas/vcstool). autoware.proj is the top directory of the AutowareArchitectureProposal project. Therefore, this repository contains high-level information of AutowareArchitectureProposal such as [the architecture design](/docs/design/software_architecture/Overview.md). diff --git a/docs/design/software_architecture/Control/Control.md b/docs/design/software_architecture/Control/Control.md index e7545389efd12..c0488b37117ff 100644 --- a/docs/design/software_architecture/Control/Control.md +++ b/docs/design/software_architecture/Control/Control.md @@ -1,6 +1,6 @@ # Control -# Overview +## Overview Control stack generates control signals to drive a vehicle following trajectories considering vehicle dynamics. This layer ensures that the vehicle follows the trajectory planned by planning. @@ -67,7 +67,7 @@ The main outputs included in Vehicle Command are as follows. As above requirements, the control stack outputs gear shifting command and acceleration command as Vehicle command -# Design +## Design ![ControlOverview](image/ControlOverview.svg) @@ -113,6 +113,6 @@ Systematic post-processing of vehicle control command, independent of trajectory - Control signal for vehicles -# References +## References TBU diff --git a/docs/design/software_architecture/Control/TrajectoryFollower/LatLonCoupler.md b/docs/design/software_architecture/Control/TrajectoryFollower/LatLonCoupler.md index b7dddfdb2e14b..6e27d30fbaa45 100644 --- a/docs/design/software_architecture/Control/TrajectoryFollower/LatLonCoupler.md +++ b/docs/design/software_architecture/Control/TrajectoryFollower/LatLonCoupler.md @@ -1,6 +1,6 @@ # Latlon Coupler -# Overview +## Overview ## Role @@ -33,6 +33,7 @@ Latlon Coupler module integrates lateral control command and longitudinal contro ### Output `autoware_control_msgs/ControlCommandStamped`: + | Input | Data Type | Explanation | | ----------------------- | ---------------- | -------------------------------- | | Velocity | std_msgs/Float64 | from Longitudinal Control module | diff --git a/docs/design/software_architecture/Control/TrajectoryFollower/LateralController.md b/docs/design/software_architecture/Control/TrajectoryFollower/LateralController.md index fd2d24b2dadae..29cb67ed371c4 100644 --- a/docs/design/software_architecture/Control/TrajectoryFollower/LateralController.md +++ b/docs/design/software_architecture/Control/TrajectoryFollower/LateralController.md @@ -1,6 +1,6 @@ # Lateral Controller -# Overview +## Overview For following target trajectory, control stack needs to output lateral control commands (steering angle, steering angle velocity), and longitudinal control commands (acceleration, velocity). Lateral controller module is responsible for calculation of lateral control commands. diff --git a/docs/design/software_architecture/Control/TrajectoryFollower/LongitudinalController.md b/docs/design/software_architecture/Control/TrajectoryFollower/LongitudinalController.md index c1fc7d39c1d52..8121ad861086a 100644 --- a/docs/design/software_architecture/Control/TrajectoryFollower/LongitudinalController.md +++ b/docs/design/software_architecture/Control/TrajectoryFollower/LongitudinalController.md @@ -1,6 +1,6 @@ # Longitudinal Controller -# Overview +## Overview For following target trajectory, control stack needs to output lateral control commands (steering angle, steering angle velocity), and longitudinal control commands (acceleration, velocity). Longitudinal controller module is responsible for calculation of longitudinal control commands. diff --git a/docs/design/software_architecture/Control/VehicleCmdGate/VehicleCmdGate.md b/docs/design/software_architecture/Control/VehicleCmdGate/VehicleCmdGate.md index 8403db30ce88e..eaf2d15379795 100644 --- a/docs/design/software_architecture/Control/VehicleCmdGate/VehicleCmdGate.md +++ b/docs/design/software_architecture/Control/VehicleCmdGate/VehicleCmdGate.md @@ -1,6 +1,6 @@ # Vehicle Cmd Gate -# Overview +## Overview Vehicle Cmd Gate module is responsible for Systematic post-processing. diff --git a/docs/design/software_architecture/ForDevelopers.md b/docs/design/software_architecture/ForDevelopers.md index 9001c701cf0d6..8cf84ae135e50 100644 --- a/docs/design/software_architecture/ForDevelopers.md +++ b/docs/design/software_architecture/ForDevelopers.md @@ -4,19 +4,21 @@ In order to test Autoware in a real vehicle, it is necessary to setup Autoware f ## 1. Sensor TF -* The sensor TF describes the positional relationship of each sensor to the vehicle's base link (defined as the center of the vehicle's rear axle) and has to be created for each configuration of sensors. -* Please setup following the [TF design document](https://github.com/tier4/AutowareArchitectureProposal.proj/blob/master/design/TF.md). +- The sensor TF describes the positional relationship of each sensor to the vehicle's base link (defined as the center of the vehicle's rear axle) and has to be created for each configuration of sensors. +- Please setup following the [TF design document](https://github.com/tier4/AutowareArchitectureProposal.proj/blob/master/design/TF.md). ## 2. Vehicle interface -* The [vehicle interface](https://github.com/tier4/AutowareArchitectureProposal.proj/blob/master/design/Vehicle/Vehicle.md#vehicle-interface) is the Autoware module that communicates with the vehicle's DBW (drive-by-wire) system, and must be created for each specific combination of vehicle and DBW. -* Please create an appropriate vehicle interface following the ["How to design a new vehicle interface"](https://github.com/tier4/AutowareArchitectureProposal.proj/blob/master/design/Vehicle/Vehicle.md#how-to-design-a-new-vehicle-interface) section of the [Vehicle stack design document](https://github.com/tier4/AutowareArchitectureProposal.proj/blob/master/design/Vehicle/Vehicle.md). -* [Sample vehicle interface file](https://github.com/tier4/lexus_description.iv.universe/blob/master/launch/vehicle_interface.launch) (for the Lexus RX 450H vehicle using [AutonomouStuff's PacMod system](https://autonomoustuff.com/products/pacmod)) + +- The [vehicle interface](https://github.com/tier4/AutowareArchitectureProposal.proj/blob/master/design/Vehicle/Vehicle.md#vehicle-interface) is the Autoware module that communicates with the vehicle's DBW (drive-by-wire) system, and must be created for each specific combination of vehicle and DBW. +- Please create an appropriate vehicle interface following the ["How to design a new vehicle interface"](https://github.com/tier4/AutowareArchitectureProposal.proj/blob/master/design/Vehicle/Vehicle.md#how-to-design-a-new-vehicle-interface) section of the [Vehicle stack design document](https://github.com/tier4/AutowareArchitectureProposal.proj/blob/master/design/Vehicle/Vehicle.md). +- [Sample vehicle interface file](https://github.com/tier4/lexus_description.iv.universe/blob/master/launch/vehicle_interface.launch) (for the Lexus RX 450H vehicle using [AutonomouStuff's PacMod system](https://autonomoustuff.com/products/pacmod)) ## 3. Vehicle info -* The `vehicle_info` YAML configuration file contains global parameters for the vehicle's physical configuration (e.g. wheel radius) that are read by Autoware in [rosparam format](http://wiki.ros.org/rosparam) and published to the ROS Parameter Server. -* The required parameters are as follows: -``` +- The `vehicle_info` YAML configuration file contains global parameters for the vehicle's physical configuration (e.g. wheel radius) that are read by Autoware in [rosparam format](http://wiki.ros.org/rosparam) and published to the ROS Parameter Server. +- The required parameters are as follows: + +```txt /vehicle_info/wheel_radius # wheel radius /vehicle_info/wheel_width # wheel width /vehicle_info/wheel_base # between front wheel center and rear wheel center @@ -25,9 +27,10 @@ In order to test Autoware in a real vehicle, it is necessary to setup Autoware f /vehicle_info/rear_overhang # between rear wheel center and vehicle rear /vehicle_info/vehicle_height # from the ground point to the highest point ``` -* [Sample vehicle info file](https://github.com/tier4/lexus_description.iv.universe/blob/master/config/vehicle_info.yaml) (for the Lexus RX 450H) + +- [Sample vehicle info file](https://github.com/tier4/lexus_description.iv.universe/blob/master/config/vehicle_info.yaml) (for the Lexus RX 450H) ## 4. Sensor launch file -* The `sensor.launch` file defines which sensor driver nodes are launched when running Autoware, and is dependent on the specific sensors (type, OEM and model) that are to be used. -* [Sample sensor.launch file](https://github.com/tier4/autoware_launcher.iv.universe/blob/master/sensing_launch/launch/sensing.launch) +- The `sensor.launch` file defines which sensor driver nodes are launched when running Autoware, and is dependent on the specific sensors (type, OEM and model) that are to be used. +- [Sample sensor.launch file](https://github.com/tier4/autoware_launcher.iv.universe/blob/master/sensing_launch/launch/sensing.launch) diff --git a/docs/design/software_architecture/Localization/Localization.md b/docs/design/software_architecture/Localization/Localization.md index 6137fb4859c66..7934744b948e5 100644 --- a/docs/design/software_architecture/Localization/Localization.md +++ b/docs/design/software_architecture/Localization/Localization.md @@ -1,7 +1,6 @@ -Localization -============= +# Localization -# Overview +## Overview The localization stack has a role to recognize where ego vehicle is ["map" frame](/design/Tf.md). Additionally, this stack estimates twist of ego vehicle for precise velocity planning and control. @@ -16,10 +15,10 @@ There are two main roles of Localization stack: | Input | Data Type | | -------------- | ------------------------------------------ | -| LiDAR | `sensor_msgs::PointCoud2` | +| LiDAR | `sensor_msgs::PointCloud2` | | GNSS | `geometry_msgs::PoseWithCovarianceStamped` | | IMU | `sensor_msgs::Imu` | -| Pointcloud Map | `sensor_msgs::PointCoud2` | +| Pointcloud Map | `sensor_msgs::PointCloud2` | | Vehicle CAN | `geometry_msgs::TwistStamped` | ### Sensors @@ -49,7 +48,7 @@ Multiple sensor information described below is considered. ### Reference Map - Pointcloud Map - + ## Output | Output | Topic (Data Type) | Use Cases of the output | @@ -69,22 +68,25 @@ Multiple sensor information described below is considered. | 6. Driving
on the target lane | Self pose on the map
Self twist | Control | To calculate target throttle/brake value and steering angle
based on pose and twist of ego vehicle and target trajectory | ## Requirements + The high-level requirements of Localization stack are listed below: -* Localization stack must provide pose in "map" frame. (Use Case 1-6) - * The output should be provided as TF from "map" to "base_link". (See [TF document](/design/TF.md) for the details) - * The localization result must be continuous - * Whenever a localization algorithm fails, the failure must be detected should not update vehicle pose. -* Localization stack must provide the velocity of the vehicle in "map" frame. (Use Case 5,6) -# Design +- Localization stack must provide pose in "map" frame. (Use Case 1-6) + - The output should be provided as TF from "map" to "base_link". (See [TF document](/design/TF.md) for the details) + - The localization result must be continuous + - Whenever a localization algorithm fails, the failure must be detected should not update vehicle pose. +- Localization stack must provide the velocity of the vehicle in "map" frame. (Use Case 5,6) + +## Design -The localization stack provides indispensable information to achieve autonomous driving. Therefore, it is not preferable to depend on only one localization algorithm. We insert pose twist fusion filter after pose and twist estimator to improve robustness of the estimated pose and twist. Also, developers can easily add a new estimator based on another sensor, e.g. camera-based visual SLAM and visual odometry, into the localization stack. The localization stack should output the transformation from map to base_link as /tf to utilize its interpolation system. +The localization stack provides indispensable information to achieve autonomous driving. Therefore, it is not preferable to depend on only one localization algorithm. We insert pose twist fusion filter after pose and twist estimator to improve robustness of the estimated pose and twist. Also, developers can easily add a new estimator based on another sensor, e.g. camera-based visual SLAM and visual odometry, into the localization stack. The localization stack should output the transformation from map to base_link as /tf to utilize its interpolation system. ![Localization_component](image/LocalizationOverview.svg) ## Pose estimator ### Role + Pose estimator is a component to estimate ego vehicle pose which includes position and orientation. The final output should also include covariance, which represents the estimator's confidence on the estimated pose. A pose estimator could either estimate pose in a local map, or it can estimate absolute pose using global localizer. The output pose can be published in any frame as long as enough /tf is provided to project into the "map" frame. ### Input @@ -95,11 +97,12 @@ Pose estimator is a component to estimate ego vehicle pose which includes positi - Pointcloud Map ### Output + - Pose with Covariance - Pose Estimator Diagnostics - ## Twist Estimator + Twist estimator is a component to estimate ego vehicle twist for precise velocity planning and control. The x-axis velocity and z-axis angular velocity of the vehicle are crucial information. These values are preferable to be noise-free and unbiased. ### Input @@ -117,9 +120,10 @@ Twist estimator is a component to estimate ego vehicle twist for precise velocit ### Role Pose Twist Fusion Filter is a component to integrate the poses estimated by pose estimators and the twists estimated by twist estimators. This assumes sequential Bayesian Filter, such as EKF and particle filter, which calculates vehicle's pose and twist probabilistically. This should also ensure the following functions: -* smoothing of estimated pose (see [TF.md](/design/TF.md)) -* outlier rejection of inputs based on previously calculated pose and it's covariance (see [TF.md](/design/TF.md)) -* time delay compensation in case pose estimators take time to calculate pose + +- smoothing of estimated pose (see [TF.md](/design/TF.md)) +- outlier rejection of inputs based on previously calculated pose and it's covariance (see [TF.md](/design/TF.md)) +- time delay compensation in case pose estimators take time to calculate pose ### Input @@ -131,4 +135,3 @@ Pose Twist Fusion Filter is a component to integrate the poses estimated by pose - Ego Vehicle Pose (/tf from map frame to base_link frame) - Ego Vehicle Twist - diff --git a/docs/design/software_architecture/Localization/PoseEstimator/PoseEstimator.md b/docs/design/software_architecture/Localization/PoseEstimator/PoseEstimator.md index a727a8aba6893..9943acad7a7ab 100644 --- a/docs/design/software_architecture/Localization/PoseEstimator/PoseEstimator.md +++ b/docs/design/software_architecture/Localization/PoseEstimator/PoseEstimator.md @@ -1,16 +1,16 @@ -Pose Estimator -============== +# Pose Estimator + +## Role -### Role Pose estimator is a component to estimate ego vehicle pose which includes position and orientation. The final output should also include covariance, which represents the estimator's confidence on estimated pose. A pose estimator could either be estimate pose on local map, or it can estimate absolute pose using global localizer. The output pose can be published in any frame as long as /tf is provided to project into the "map" frame. Also, pose estimator should stop publishing pose if it is possible to calculate reliability of estimated pose(e.g. matching score with map) and the reliability is low. ## Input | Input | Data Type | | ----------------------------------------- | ------------------------------------------ | -| LiDAR | `sensor_msgs::PointCoud2` | +| LiDAR | `sensor_msgs::PointCloud2` | | GNSS | `geometry_msgs::PoseWithCovarianceStamped` | -| Pointcloud Map | `sensor_msgs::PointCoud2` | +| Pointcloud Map | `sensor_msgs::PointCloud2` | | Feedback from
Pose Twist Fusion Filter | `geometry_msgs::PoseWithCovarianceStamped` | ## Output @@ -22,7 +22,7 @@ Pose estimator is a component to estimate ego vehicle pose which includes positi ## Design -This is a sample design of our implementation using NDT Scan Matcher. +This is a sample design of our implementation using NDT Scan Matcher. ![Pose_Estimator](image/PoseEstimator.svg) We integrated 3D NDT registration method for sample pose estimation algorithm. The NDT registration method is a local localization method that requires a good initial guess before optimizing pose. In order to realize fully automatic localization, GNSS is used for first initialization. After first loop of pose estimation, the output of pose twist fusion filter is used as next initial guess of NDT registration. @@ -30,7 +30,8 @@ We integrated 3D NDT registration method for sample pose estimation algorithm. T Note that NDT scan matcher does not publish pose when matching score calculated in alignment is less than threshold value to avoid publishing wrong estimated pose to Pose Twist Fusion Filter. Lidar sensors usually operate at 10 ~ 20 Hz and therefore NDT alignment should be executed within approximately 100 ms. In order to reduce execution time, We apply two pointcloud preprocessors to raw pointcloud from lidar sensors; Crop Measurement Range and DownSampler. + - Crop Measurement Range removes points far from ego vehicle. - DownSampler reduces the number of points by calculating a centroid of points in each voxelized grid. -Pose initializer adds height information into initial pose obtained from GNSS by looking for minimum height point from points within 1 m. +Pose initializer adds height information into initial pose obtained from GNSS by looking for minimum height point from points within 1 m. diff --git a/docs/design/software_architecture/Localization/PoseTwistFusionFilter/PoseTwistFusionFilter.md b/docs/design/software_architecture/Localization/PoseTwistFusionFilter/PoseTwistFusionFilter.md index 106ef9ab055b7..b4af5173fffd8 100644 --- a/docs/design/software_architecture/Localization/PoseTwistFusionFilter/PoseTwistFusionFilter.md +++ b/docs/design/software_architecture/Localization/PoseTwistFusionFilter/PoseTwistFusionFilter.md @@ -1,5 +1,4 @@ -Pose Twist Fusion Filter -======================== +# Pose Twist Fusion Filter ## Role diff --git a/docs/design/software_architecture/Localization/TwistEstimator/TwistEstimator.md b/docs/design/software_architecture/Localization/TwistEstimator/TwistEstimator.md index 383c37fd81c84..e0a443c2054c3 100644 --- a/docs/design/software_architecture/Localization/TwistEstimator/TwistEstimator.md +++ b/docs/design/software_architecture/Localization/TwistEstimator/TwistEstimator.md @@ -1,10 +1,11 @@ -Twist Estimator -============== +# Twist Estimator ## Twist Estimator -Twist estimator is a component to estimate ego vehicle twist for precise velocity planning and control. The x-axis velocity and z-axis angular velocity in vehicle twist is mainly considered. These values are preferable to be noise-free and unbiased. + +Twist estimator is a component to estimate ego vehicle twist for precise velocity planning and control. The x-axis velocity and z-axis angular velocity in vehicle twist is mainly considered. These values are preferable to be noise-free and unbiased. ## Input + | Input | Data Type | | ----------- | ----------------------------- | | Vehicle CAN | `geometry_msgs::TwistStamped` | @@ -17,8 +18,7 @@ Twist estimator is a component to estimate ego vehicle twist for precise velocit | Estimated Twist | `geometry_msgs::TwistWithCovarianceStamped` | Pose Twist Fusion Filter | ## Design - + ![TwistEstimator](image/TwistEstimator.svg) In the figure above, the solid line of output shows our implementation, while the dotted lines of output show feasible implementations. Estimated twist compensates estimated pose in Pose Twist Fusion Filter, and also becomes the alternative to estimated pose when the calculation of Pose Estimator is highly unreliable. Therefore, estimated twist is preferable to be noise-free and unbiased. We adopt this design to merge multiple sensor inputs to generate more accurate twist. - diff --git a/docs/design/software_architecture/Map/Map.md b/docs/design/software_architecture/Map/Map.md index c1b9a0db7aded..d374f4fa1f452 100644 --- a/docs/design/software_architecture/Map/Map.md +++ b/docs/design/software_architecture/Map/Map.md @@ -1,7 +1,7 @@ -Map -============= +# Map + +## Overview -# Overview Map is responsible for distributing static information about the environment that autonomous vehicle might drive. Currently, this is separated into two categories: - Geometric information about the environment (pointcloud map) @@ -10,50 +10,56 @@ Map is responsible for distributing static information about the environment tha ## Use Cases ### Pointcloud Map + Use cases of the maps are the following: -* Localization: Autoware must always be aware of its position in Earth frame. Pointcloud map is used for local localization with LiDAR based localization algorithm such as NDT matching. + +- Localization: Autoware must always be aware of its position in Earth frame. Pointcloud map is used for local localization with LiDAR based localization algorithm such as NDT matching. ### Vector Map -Vector map provides semantic information about roads and is used for various uses cases in both Planning and Perception. Note that some of the following use cases might be removed as performance of perception improves. For example, retrieving lane shapes and detecting traffic lights can be done online e.g. using camera images. However, with consideration of [spoofing attacks](https://www.nassiben.com/mobilbye) and reliability of current Perception stack, following uses cases must be supported by vector map for now. -* Planning: - * Calculating route from start to goal - * Creating trajectory along lane during lane following - * Driving according to traffic rules, such as speed limit, traffic light, and right of way. -* Perception: - * predicting other participant's trajectory along lanes - * Detecting traffic lights +Vector map provides semantic information about roads and is used for various uses cases in both Planning and Perception. Note that some of the following use cases might be removed as performance of perception improves. For example, retrieving lane shapes and detecting traffic lights can be done online e.g. using camera images. However, with consideration of [spoofing attacks](https://www.nassiben.com/mobilbye) and reliability of current Perception stack, following uses cases must be supported by vector map for now. +- Planning: + - Calculating route from start to goal + - Creating trajectory along lane during lane following + - Driving according to traffic rules, such as speed limit, traffic light, and right of way. +- Perception: + - predicting other participant's trajectory along lanes + - Detecting traffic lights ## Requirements + The role of map is to publish map information to other stacks. In order to satisfy use cases above, the following requirements must be met. -### PointCloud map - * The map should provide geometric information of surrounding environment that includes any region 200m away from any possible route that the vehicle might take. - * Resolution of pointcloud map must be at least 0.2m. (from past experience) - * Size of the map should be less than 1GB in binary format. (Limit of ROS message) - * Pointcloud must be georeferenced. +### PointCloud map + +- The map should provide geometric information of surrounding environment that includes any region 200m away from any possible route that the vehicle might take. +- Resolution of pointcloud map must be at least 0.2m. (from past experience) +- Size of the map should be less than 1GB in binary format. (Limit of ROS message) +- Pointcloud must be georeferenced. ### Vector Map - * The map should be georeferenced. - * The map should include region within 200m away from any possible route that autonomous vehicle might drive with following information: - * Routing: the map should be able to retrieve next lane, previous lane, right lane, and left lane of a lane with availability of lane change. - * Geometry: shape and position of following objects must be provided: - * lanes - * traffic lights - * stop lines - * crosswalk - * parking space - * and parking lots. - * Traffic rules: the map should be able to retrieve following information from given lane: - * traffic lights - * stop lines - * traffic signs - * right of way - * speed limit - * direction of lanes + +- The map should be georeferenced. +- The map should include region within 200m away from any possible route that autonomous vehicle might drive with following information: + - Routing: the map should be able to retrieve next lane, previous lane, right lane, and left lane of a lane with availability of lane change. + - Geometry: shape and position of following objects must be provided: + - lanes + - traffic lights + - stop lines + - crosswalk + - parking space + - and parking lots. + - Traffic rules: the map should be able to retrieve following information from given lane: + - traffic lights + - stop lines + - traffic signs + - right of way + - speed limit + - direction of lanes ## Input + The input to Map stack: | Input | Data Type | Explanation | @@ -70,7 +76,7 @@ The table below summarizes the output from Map stack: | PointCloud map | `/map/pointcloud_map`
(`sensor_msgs::PointCloud2`) | This includes the shape of surrounding environment as collection of points.
This is assumed to be used by Localization module for map matching with LiDAR pointclouds. | | Vector Map | `/map/vector_map`
(`autoware_lanelet2_msgs::MapBin`) | Lanelet2 map information will be dumped as serialized data, and passed down to other stacks. Then, it will be converted back to internal data structure to enable Lanelet2 library API access to each data.
This is assumed to be used by Localization stack for lane-based localization, Perception stack for trajectory prediction of other vehicles, and Planning to plan behavior to follow traffic rules. | -# Design +## Design Map module consist of two modules: pointcloud map loader and vector map loader. Since map data are converted into binary data array, it is meant to be converted back to internal data structure using appropriate library, for example PCL for pointcloud and Lanelet2 for vector map. The access to each data element is also assumed to be done through map library. @@ -84,20 +90,34 @@ Role of this module is to output pointcloud map in `map` frame to be used by oth ### Input -- PCD File
This contains collection of point coordinates from reference point. -- YAML File
This is meant to contain the coordinate of origin of PCD file in earth frame (either in ECEF or WGS84) +- PCD File + + This contains collection of point coordinates from reference point. + +- YAML File + + This is meant to contain the coordinate of origin of PCD file in earth frame (either in ECEF or WGS84) ### Output -- pointcloud_map: `sensor_msgs::PointCloud2`
This module should output environment information as pointcloud message type. The points in the message should be projected into map frame since the main user is assumed to be Localization stack. +- pointcloud_map: `sensor_msgs::PointCloud2` + +This module should output environment information as pointcloud message type. The points in the message should be projected into map frame since the main user is assumed to be Localization stack. ## Vector Map Loader ### Role + Role of this module is to semantic road information in map frame to be used by other stacks. ### Input -- Lanelet2 Map File (OSM file)
This includes all lane-related information. The specification about the format is specified [here](./SemanticMap/AutowareLanelet2Format.md). + +- Lanelet2 Map File (OSM file) + +This includes all lane-related information. The specification about the format is specified [here](./SemanticMap/AutowareLanelet2Format.md). ### Output -- vector_map `autoware_lanelet_msgs::MapBin`
This contains serialized data of Lanelet2 map. All coordinate data contained in the map should be already projected into map frame using specified ECEF parameter. + +- vector_map `autoware_lanelet_msgs::MapBin` + +This contains serialized data of Lanelet2 map. All coordinate data contained in the map should be already projected into map frame using specified ECEF parameter. diff --git a/docs/design/software_architecture/Map/SemanticMap/AutowareLanelet2Format.md b/docs/design/software_architecture/Map/SemanticMap/AutowareLanelet2Format.md index 697a8b6237ed0..40d22e846e685 100644 --- a/docs/design/software_architecture/Map/SemanticMap/AutowareLanelet2Format.md +++ b/docs/design/software_architecture/Map/SemanticMap/AutowareLanelet2Format.md @@ -1,34 +1,44 @@ # Modifying Lanelet2 format for Autoware + ## Overview + About the basics of the default format, please refer to main [Lanelet2 repository](https://github.com/fzi-forschungszentrum-informatik/Lanelet2). (see [here](https://github.com/fzi-forschungszentrum-informatik/Lanelet2/blob/master/lanelet2_core/doc/LaneletPrimitives.md) about primitives) -In addition to default Lanelet2 Format, users should add following mandatory/optional tags to their osm lanelet files as explained in reset of this document. +In addition to default Lanelet2 Format, users should add following mandatory/optional tags to their osm lanelet files as explained in reset of this document. ## Additional Conventions + ### Lane length + Original Lanelet2 format does not specify the minimum or maximum length of lane. However, without the limit, it is very difficult to estimate memory size and computation time using lane information. Therefore, we set general convention that a lanelet should have length between 1m ~ 50m. ### No Self Crossing Lanes + In order to make geometrical calculation of lane easier (e.g. getting drivable area, operating triangulation, etc) we set a convention that no lanes should overlap itself. Whenever, it overlaps itself, the lane should be separated into two different lanes with same attributes. ## Additional Mandatory Tags + ### Elevation Tags + Elevation("ele") information for points(`node`) is optional in default Lanelet2 format. -However, some of Autoware packages(e.g. trafficlight_recognizer) need elevation to be included in HD map. Therefore, users must make sure that all points in their osm maps contain elevation tags. +However, some of Autoware packages(e.g. trafficlight_recognizer) need elevation to be included in HD map. Therefore, users must make sure that all points in their osm maps contain elevation tags. Here is an example osm syntax for node object. -``` + +```xml - + ``` ### SpeedLimit Tags -Speed limit("speed_limit") information for lanelet(`relation`) is optional in default Lanelet2 format, and there are several ways of setting the information (e.g. using tag or using regulatory element or using default value in traffic rules). To avoid confusion, we will make this information mandatory for all road lanelets, and should not expect Autoware to use default value. -Therefore, users must make sure that all lanelets in their osm maps contain speed_limit tags. + +Speed limit("speed_limit") information for lanelet(`relation`) is optional in default Lanelet2 format, and there are several ways of setting the information (e.g. using tag or using regulatory element or using default value in traffic rules). To avoid confusion, we will make this information mandatory for all road lanelets, and should not expect Autoware to use default value. +Therefore, users must make sure that all lanelets in their osm maps contain speed_limit tags. Here is an example osm syntax for a lanelet object. -``` + +```xml @@ -39,36 +49,40 @@ Here is an example osm syntax for a lanelet object. ``` ### TrafficLights -Default Lanelet2 format uses LineString(`way`) or Polygon class to represent the shape of a traffic light. For Autoware, traffic light objects must be represented only by LineString to avoid confusion, where start point is at bottom left edge and end point is at bottom right edge. Also, "height" tag must be added in order to represent the size in vertical direction(not the position). + +Default Lanelet2 format uses LineString(`way`) or Polygon class to represent the shape of a traffic light. For Autoware, traffic light objects must be represented only by LineString to avoid confusion, where start point is at bottom left edge and end point is at bottom right edge. Also, "height" tag must be added in order to represent the size in vertical direction(not the position). The Following image illustrates how LineString is used to represent shape of Traffic Light in Autoware. +Here is an example osm syntax for traffic light object. -Here is an example osm syntax for traffic light object. -``` +```xml - + ``` ### Turn Directions -Users must add "turn_direction" tags to lanelets within intersections to indicate vehicle's turning direction. You do not need this tags for lanelets that are not in intersections. If you do not have this tag, Autoware will not be able to light up turning indicators. + +Users must add "turn_direction" tags to lanelets within intersections to indicate vehicle's turning direction. You do not need this tags for lanelets that are not in intersections. If you do not have this tag, Autoware will not be able to light up turning indicators. This tags only take following values: -* left -* right -* straight + +- left +- right +- straight Following image illustrates how lanelets should be tagged. -Here is an example of osm syntax for lanelets in intersections. -``` +Here is an example of osm syntax for lanelets in intersections. + +```xml @@ -77,18 +91,21 @@ Here is an example of osm syntax for lanelets in intersections. - + ``` ## Optional Tags -Following tags are optional tags that you may want to add depending on how you want to use your map in Autoware. + +Following tags are optional tags that you may want to add depending on how you want to use your map in Autoware. ### Meta Info + Users may add the `MetaInfo` element to their OSM file to indicate format version and map version of their OSM file. This information is not meant to influence Autoware vehicle's behavior, but is published as ROS message so that developers could know which map was used from ROSBAG log files. MetaInfo elements exists in the same hierarchy with `node`, `way`, and `relation` elements, otherwise JOSM wouldn't be able to load the file correctly. Here is an example of MetaInfo in osm file: -``` + +```xml @@ -96,18 +113,20 @@ Here is an example of MetaInfo in osm file: ... ... -``` +``` ### Local Coordinate Expression -Sometimes users might want to create Lanelet2 maps that are not georeferenced. -In such a case, users may use "local_x", "local_y" taggings to express local positions instead of latitude and longitude. + +Sometimes users might want to create Lanelet2 maps that are not georeferenced. +In such a case, users may use "local_x", "local_y" taggings to express local positions instead of latitude and longitude. Autoware Osm Parser will overwrite x,y positions with these tags when they are present. -For z values, use "ele" tags as default Lanelet2 Format. -You would still need to fill in lat and lon attributes so that parser does not crush, but their values could be anything. +For z values, use "ele" tags as default Lanelet2 Format. +You would still need to fill in lat and lon attributes so that parser does not crush, but their values could be anything. Here is example `node` element in osm with "local_x", "local_y" taggings: -``` - + +```xml + diff --git a/docs/design/software_architecture/Messages.md b/docs/design/software_architecture/Messages.md index acc23810bc11a..4eb5139b8caef 100644 --- a/docs/design/software_architecture/Messages.md +++ b/docs/design/software_architecture/Messages.md @@ -1,7 +1,6 @@ -Messages -========== +# Messages -# Overview +## Overview This page describes the eight categories of message in the new architecture, along with definitions for each message. @@ -35,7 +34,7 @@ This page describes the eight categories of message in the new architecture, alo `Header header` `string format_version` `string map_version` -`int8[] data` +`int8[] data` ## Autoware perception messages @@ -44,32 +43,32 @@ This page describes the eight categories of message in the new architecture, alo `uuid_msgs/UniqueID id` `Semantic semantic` `State state` -`Shape shape` +`Shape shape` ### DynamicObjectArray.msg `std_msgs/Header header` -`DynamicObject[] objects` +`DynamicObject[] objects` ### DynamicObjectWithFeature.msg `DynamicObject object` -`Feature feature` +`Feature feature` ### DynamicObjectWithFeatureArray.msg `std_msgs/Header header` -`DynamicObjectWithFeature[] feature_objects` +`DynamicObjectWithFeature[] feature_objects` ### Feature.msg `sensor_msgs/PointCloud2 cluster` -`sensor_msgs/RegionOfInterest roi` +`sensor_msgs/RegionOfInterest roi` ### PredictedPath.msg `geometry_msgs/PoseWithCovarianceStamped[] path` -`float64 confidence` +`float64 confidence` ### Semantic.msg @@ -82,7 +81,7 @@ This page describes the eight categories of message in the new architecture, alo `uint8 PEDESTRIAN=6` `uint8 ANIMAL=7` `uint32 type` -`float64 confidence` +`float64 confidence` ### Shape.msg @@ -91,7 +90,7 @@ This page describes the eight categories of message in the new architecture, alo `uint8 POLYGON=2` `uint8 type` `geometry_msgs/Vector3 dimensions` -`geometry_msgs/Polygon footprint` +`geometry_msgs/Polygon footprint` ### State.msg @@ -101,19 +100,19 @@ This page describes the eight categories of message in the new architecture, alo `bool twist_reliable` `geometry_msgs/AccelWithCovariance acceleration_covariance` `bool acceleration_reliable` -`PredictedPath[] predicted_paths` +`PredictedPath[] predicted_paths` ## Autoware planning messages ### LaneSequence.msg -`int64[] lane_ids` +`int64[] lane_ids` ### Path.msg `std_msgs/Header header` `autoware_planning_msgs/PathPoint[] points` -`nav_msgs/OccupancyGrid drivable_area` +`nav_msgs/OccupancyGrid drivable_area` ### PathPoint.msg @@ -121,30 +120,30 @@ This page describes the eight categories of message in the new architecture, alo `uint8 FIXED=1` `geometry_msgs/Pose pose` `geometry_msgs/Twist twist` -`uint8 type` +`uint8 type` ### PathPointWithLaneId.msg `autoware_planning_msgs/PathPoint point` -`int64[] lane_ids` +`int64[] lane_ids` ### PathWithLaneId.msg `std_msgs/Header header` `autoware_planning_msgs/PathPointWithLaneId[] points` -`nav_msgs/OccupancyGrid drivable_area` +`nav_msgs/OccupancyGrid drivable_area` ### Route.msg `std_msgs/Header header` `geometry_msgs/Pose goal_pose` -`autoware_planning_msgs/RouteSection[] route_sections` +`autoware_planning_msgs/RouteSection[] route_sections` ### RouteSection.msg `int64[] lane_ids` `int64 preferred_lane_id` -`int64[] continued_lane_ids` +`int64[] continued_lane_ids` ### Scenario.msg @@ -152,7 +151,7 @@ This page describes the eight categories of message in the new architecture, alo `string LaneDriving=LaneDriving` `string Parking=Parking` `string current_scenario` -`string[] activating_scenarios` +`string[] activating_scenarios` ### Trajectory.msg @@ -163,7 +162,7 @@ This page describes the eight categories of message in the new architecture, alo `geometry_msgs/Pose pose` `geometry_msgs/Twist twist` -`geometry_msgs/Accel accel` +`geometry_msgs/Accel accel` ## Autoware system messages @@ -178,7 +177,7 @@ This page describes the eight categories of message in the new architecture, alo `string ArrivedGoal=ArrivedGoal` `string FailedToArriveGoal=FailedToArriveGoal` `string state` -`string msg` +`string msg` ## Autoware traffic light messages @@ -193,27 +192,27 @@ This page describes the eight categories of message in the new architecture, alo `uint8 UP=6` `uint8 DOWN=7` `uint32 type` -`float32 confidence` +`float32 confidence` ### TrafficLightRoi.msg `sensor_msgs/RegionOfInterest roi` -`int32 id` +`int32 id` ### TrafficLightRoiArray.msg `std_msgs/Header header` -`autoware_traffic_light_msgs/TrafficLightRoi[] rois` +`autoware_traffic_light_msgs/TrafficLightRoi[] rois` ### TrafficLightState.msg `autoware_traffic_light_msgs/LampState[] lamp_states` -`int32 id` +`int32 id` ### TrafficLightStateArray.msg `std_msgs/Header header` -`autoware_traffic_light_msgs/TrafficLightState[] states` +`autoware_traffic_light_msgs/TrafficLightState[] states` ## Autoware vector map messages @@ -222,7 +221,7 @@ This page describes the eight categories of message in the new architecture, alo `std_msgs/Header header` `string format_version` `string map_version` -`int8[] data` +`int8[] data` ## Autoware vehicle messages @@ -239,7 +238,7 @@ This page describes the eight categories of message in the new architecture, alo `std_msgs/Header header` `float64 throttle` -`float64 brake` +`float64 brake` ### Shift.msg @@ -249,17 +248,17 @@ This page describes the eight categories of message in the new architecture, alo `uint8 NEUTRAL=3` `uint8 DRIVE=4` `uint8 LOW=5` -`int32 data` +`int32 data` ### ShiftStamped.msg `std_msgs/Header header` -`autoware_vehicle_msgs/Shift shift` +`autoware_vehicle_msgs/Shift shift` ### Steering.msg `std_msgs/Header header` -`float32 data` +`float32 data` ### TurnSignal.msg @@ -268,11 +267,11 @@ This page describes the eight categories of message in the new architecture, alo `uint8 LEFT = 1` `uint8 RIGHT = 2` `uint8 HAZARD = 3` -`int32 data` +`int32 data` ### VehicleCommand.msg `std_msgs/Header header` `autoware_control_msgs/ControlCommand control` `autoware_vehicle_msgs/Shift shift` -`int32 emergency` +`int32 emergency` diff --git a/docs/design/software_architecture/NamingConvention.md b/docs/design/software_architecture/NamingConvention.md index c52f646fcce1f..da1bbd2d62b68 100644 --- a/docs/design/software_architecture/NamingConvention.md +++ b/docs/design/software_architecture/NamingConvention.md @@ -1,35 +1,39 @@ # Naming Conventions ## Package Names + Although Autoware does not have its own explicit naming convention, it does adhere to the guidance given in [REP-144](https://www.ros.org/reps/rep-0144.html). Thus an Autoware package name must: ->* only consist of lowercase alphanumerics and _ separators, and start with an alphabetic character ->* not use multiple _ separators consecutively ->* be at least two characters long +> - only consist of lowercase alphanumerics and \_ separators, and start with an alphabetic character +> - not use multiple \_ separators consecutively +> - be at least two characters long ## Topic Names ### Default topic names + In Autoware, all topics should be named according to the guidelines in the [ROS wiki](http://wiki.ros.org/Names). Additionally, it is strongly recommended that the default topic names specified in source code should follow these conventions: -* All topics must be set under private namespaces. Any global topics must have a documented explanation. -* All topics must be specified under one of the following namespaces within the node's private namespace. Doing so allows users to easily understand which topics are inputs and which are outputs when they look at remapping in launch files for example. - * `input`: subscribed topics - * `output`: published topics - * `debug`: published topics that are meant for debugging (e.g. for visualization) +- All topics must be set under private namespaces. Any global topics must have a documented explanation. +- All topics must be specified under one of the following namespaces within the node's private namespace. Doing so allows users to easily understand which topics are inputs and which are outputs when they look at remapping in launch files for example. + - `input`: subscribed topics + - `output`: published topics + - `debug`: published topics that are meant for debugging (e.g. for visualization) Consider, for example, a node that subscribes to pointcloud data, applies a voxel grid filter and then publishes the filtered data. In this case, the topics should be named as follows: -* ~input/points_original -* ~output/points_filtered +- ~input/points_original +- ~output/points_filtered ### Remapped Topics + The default topics of each node can be remapped to other topic names using a launch file. For encapsulation purposes and ease of understanding, remapped topics should be published under the namespaces of the appropriate modules as per Autoware's layered architecture. Doing so allows both developers and users to see at a glance where the topic is used in the architecture. Some key topics are listed below: -``` + +```txt /control/vehicle_cmd /perception/object_recognition/detection/objects /perception/object_recognition/objects diff --git a/docs/design/software_architecture/Overview.md b/docs/design/software_architecture/Overview.md index b49f542a02b18..afe684cbd2bb0 100644 --- a/docs/design/software_architecture/Overview.md +++ b/docs/design/software_architecture/Overview.md @@ -1,7 +1,6 @@ -Architecture Overview -===================== +# Architecture Overview -# Introduction +## Introduction Currently it is difficult to improve Autoware.AI's capabilities due to a lack of concrete architecture design and a lot of technical debt, such as the tight coupling between modules as well as unclear responsibilities for each module. At Tier IV, we thought that a new architecture was needed to help accelerate the development of Autoware. @@ -13,14 +12,14 @@ The purpose of this proposal is to define a layered architecture that clarifies Note that the initial focus of this architecture design was solely on driving capability, and so the following features were left as future work: -* Fail safe -* HMI -* Real-time processing -* Redundant system -* State monitoring system +- Fail safe +- HMI +- Real-time processing +- Redundant system +- State monitoring system +## Use Cases -# Use Cases When designing the architecture, the use case of last-mile travel was chosen. For example: **Description:** Travelling to/from a grocery store in the same city @@ -31,7 +30,7 @@ When designing the architecture, the use case of last-mile travel was chosen. Fo - Weather conditions are fine - Accurate HD map of the environment is available -**Basic Flow:** +**Basic Flow:** 1. **User:** Starts a browser on their phone and accesses the Autoware web app. Presses "Summon", and the app sends the user’s GPS location to Autoware 2. **Autoware:** Plans a route to the user’s location, and shows it on the user’s phone @@ -42,7 +41,8 @@ When designing the architecture, the use case of last-mile travel was chosen. Fo 7. **User:** Confirms the route and presses “Engage” 8. **Autoware:** Drives autonomously to the user's home -# Requirements +## Requirements + To achieve this last-mile use case, the following functional requirements for Autoware were set: - Autoware can plan a route to the specified goal within the type of environment described above. @@ -55,7 +55,8 @@ Since Autoware is open source and is meant to be used/developed by people around - Architecture can be extended to follow traffic rules for different countries - The role and interface of a module must be clearly defined -# High-level Architecture Design +## High-level Architecture Design + ![Overview](image/Overview2.svg) This new architecture consists of the following six stacks. Each of these design pages contains a more detailed set of requirements and use cases specific to that stack: @@ -68,5 +69,6 @@ This new architecture consists of the following six stacks. Each of these design - [Vehicle](Vehicle/Vehicle.md) - [Map](Map/Map.md) -# References +## References + - [New architecture presentation given to the AWF Technical Steering Committee, March 2020](https://discourse.ros.org/uploads/short-url/woUU7TGLPXFCTJLtht11rJ0SqCL.pdf) diff --git a/docs/design/software_architecture/Perception/ObjectRecognition/Detection/Detection.md b/docs/design/software_architecture/Perception/ObjectRecognition/Detection/Detection.md index c483604b9a5e9..f7e86b3bcf4a3 100644 --- a/docs/design/software_architecture/Perception/ObjectRecognition/Detection/Detection.md +++ b/docs/design/software_architecture/Perception/ObjectRecognition/Detection/Detection.md @@ -1,30 +1,32 @@ -Detection -===== +# Detection + ## Use Cases and Requirements + Detection in Object Recognition is required for use cases involved with obstacles: -* Changing lane -* Turning at intersection -* Avoiding parked vehicles -* Stopping at a crosswalk when pedestrians are walking -* Passing intersection without traffic lights -* Merging into another lane -* Taking over Pedestrian/Cyclists -* Stopping/yielding to an obstacle +- Changing lane +- Turning at intersection +- Avoiding parked vehicles +- Stopping at a crosswalk when pedestrians are walking +- Passing intersection without traffic lights +- Merging into another lane +- Taking over Pedestrian/Cyclists +- Stopping/yielding to an obstacle For the details about related requirements, please refer to the [document for Perception stack](/design/Perception/Perception.md). ## Role + Detection in Object Recognition detects objects by processing sensor data. Detection is triggered on every sensor data callback independently from previous detection results. The Detection module is responsible for calculating objects' pose, class, and shape. ## Input -| Input | Data Type | Topic | -| ----------- | ------------------------- | ----------------------------- | -| LiDAR | `sensor_msgs::PointCoud2` | /sensing/lidar/pointcloud | -| Camera | `sensor_msgs::Image` | /sensing/camera/*/image_raw | -| Camera info | `sensor_msgs::CameraInfo` | /sensing/camera/*/camera_info | -| TF | `tf2_msgs::TFMessage` | /tf | +| Input | Data Type | Topic | +| ----------- | -------------------------- | ------------------------------ | +| LiDAR | `sensor_msgs::PointCloud2` | /sensing/lidar/pointcloud | +| Camera | `sensor_msgs::Image` | /sensing/camera/\*/image_raw | +| Camera info | `sensor_msgs::CameraInfo` | /sensing/camera/\*/camera_info | +| TF | `tf2_msgs::TFMessage` | /tf | ## Output @@ -33,21 +35,19 @@ Detection in Object Recognition detects objects by processing sensor data. Detec | Dynamic Objects | `autoware_perception_msgs::DynamicObjectArray` | Object Recognition: Tracking | `base_link` | /perception/object_recognition/detection/objects | ## Design + The Detection module is designed to adopt various detection algorithms. ![msg](../image/ObjectDetectionDesign.png) This is one of our sample implementations for the Detection module. ![msg](../image/ObjectDetectionDesign2.png) - ## Requirement in Output Designated objects' properties in `autoware_perception_msgs::DynamicObject` need to be filled in the Detection module before passing to the Tracking module. ![msg](../image/ObjectDetectionRequirement.png) - - | Property | Definition | Data Type | Parent Data Type | | -------------------- | --------------------------------------------- | ----------------------------------- | ----------------------------------------- | | type | Class information | `uint8` | `autoware_perception_msgs::Semantic` | diff --git a/docs/design/software_architecture/Perception/ObjectRecognition/Prediction/Prediction.md b/docs/design/software_architecture/Perception/ObjectRecognition/Prediction/Prediction.md index 7dc34f91fdfcc..0a14c31203382 100644 --- a/docs/design/software_architecture/Perception/ObjectRecognition/Prediction/Prediction.md +++ b/docs/design/software_architecture/Perception/ObjectRecognition/Prediction/Prediction.md @@ -1,20 +1,21 @@ -Detection -===== +# Detection + ## Use Cases and Requirements Prediction in Object Recognition is required for use cases involved with obstacles: -* Changing lane -* Turning at intersection -* Stopping at a crosswalk when pedestrians are walking -* Passing intersection without traffic lights -* Merging into another lane -* Taking over Pedestrian/Cyclists -* Stopping/yielding to an obstacle +- Changing lane +- Turning at intersection +- Stopping at a crosswalk when pedestrians are walking +- Passing intersection without traffic lights +- Merging into another lane +- Taking over Pedestrian/Cyclists +- Stopping/yielding to an obstacle For the details about related requirements, please refer to the [document for Perception stack](/design/Perception/Perception.md). ## Role + Prediction in Object Recognition estimate objects' intention. Intentions are represented as objects' future trajectories with covariance. The Planning module makes a decision and plans a future ego-motion based on the results of predicted objects. ## Input @@ -32,16 +33,16 @@ Prediction in Object Recognition estimate objects' intention. Intentions are rep | Dynamic Objects | `autoware_perception_msgs::DynamicObjectArray` | Planning | `map` | /perception/object_recognition/objects | ## Design + This is our sample implementation for the Tracking module. ![msg](/design/img/ObjectPredictionDesign.png) - ## Requirement in Output + Designated objects' property in autoware_perception_msgs::DynamicObject needs to be filled in the Prediction module before passing to the Planning component. ![msg](../image/ObjectPredictionRequirement.png) - -| Property | Definition | Data Type | Parent Data Type | -| -------------- | ------------------------------------- | -------------------------------------------- | --------------------------------- | -| predicted_path | Predicted future paths for an object. | `autoware_perception_msgs::PredictedPath[] ` | `autoware_perception_msgs::State` | +| Property | Definition | Data Type | Parent Data Type | +| -------------- | ------------------------------------- | ------------------------------------------- | --------------------------------- | +| predicted_path | Predicted future paths for an object. | `autoware_perception_msgs::PredictedPath[]` | `autoware_perception_msgs::State` | diff --git a/docs/design/software_architecture/Perception/ObjectRecognition/Tracking/Tracking.md b/docs/design/software_architecture/Perception/ObjectRecognition/Tracking/Tracking.md index dd566be69c901..5c47ae2d93ceb 100644 --- a/docs/design/software_architecture/Perception/ObjectRecognition/Tracking/Tracking.md +++ b/docs/design/software_architecture/Perception/ObjectRecognition/Tracking/Tracking.md @@ -1,21 +1,22 @@ -Detection -===== +# Detection ## Use Cases and Requirements + Tracking in Object Recognition is required for use cases involved with obstacles: -* Changing lane -* Turning at intersection -* Avoiding parked vehicles -* Stopping at a crosswalk when pedestrians are walking -* Passing intersection without traffic lights -* Merging into another lane -* Taking over Pedestrian/Cyclists -* Stopping/yielding to an obstacle +- Changing lane +- Turning at intersection +- Avoiding parked vehicles +- Stopping at a crosswalk when pedestrians are walking +- Passing intersection without traffic lights +- Merging into another lane +- Taking over Pedestrian/Cyclists +- Stopping/yielding to an obstacle For the details about related requirements, please refer to the [document for Perception stack](/design/Perception/Perception.md). ## Role + Tracking in Object Recognition keeps objects' unique id over time. This time series processing leads to estimating objects' property such as their velocity and/or acceleration. Furthermore, it could estimate more accurate objects' orientation by leveraging the Detection results over time. ## Input @@ -32,17 +33,16 @@ Tracking in Object Recognition keeps objects' unique id over time. This time ser | Dynamic Objects | `autoware_perception_msgs::DynamicObjectArray` | Object Recognition: Prediction | `map` | /perception/object_recognition/tracking/objects | ## Design + This is our sample implementation for the Tracking module. ![msg](../image/ObjectTrackingDesign.png) - ## Requirement in Output Designated objects' properties in autoware_perception_msgs::DynamicObject need to be filled in the Tracking module before passing to the Prediction module. ![msg](../image/ObjectTrackingRequirement.png) - | Property | Definition | Data Type | Parent Data Type | | --------------------- | --------------------------------------- | ------------------------------------ | ----------------------------------------- | | id | Unique object id over frames | `uuid_msgs::UniqueID` | `autoware_perception_msgs::DynamicObject` | diff --git a/docs/design/software_architecture/Perception/Perception.md b/docs/design/software_architecture/Perception/Perception.md index 8e839c4dddcc5..aad79df48cf56 100644 --- a/docs/design/software_architecture/Perception/Perception.md +++ b/docs/design/software_architecture/Perception/Perception.md @@ -1,80 +1,82 @@ -Perception -============= -# Overview +# Perception + +## Overview + Perception stack recognizes the surrounding of the vehicle to achieve safe and efficient autonomous driving. The output of Sensing describes environment "as is", and is usually too primitive to be used directly for high-level planning. Perception stack will extract key and organize it into more meaningful data for Planning stack. ![Perception_overview](image/PerceptionOverview.svg) -# Role +## Role + Perception stack has 2 main roles. - **Object Recognition** - **Traffic Light Recognition** - ## Use Cases + Perception must provide enough information to support following use cases: -| Use case | Required output from `Perception` | How the output is used | -| ------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 1. Changing lane | **Object Recognition-Prediction**:
- Predicted paths of objects on target lane | To decide `when` and `where` changing lane depending on objects' predicted paths.
`when`: which timing to trigger lane change depending on obstacles position and velocity.
`where`: where to go depending on objects' position and shape. | -| 2. Turning at intersection | **Object Recognition- Prediction**:
- Predicted paths of objects at an intersection | To decide `when` turning at an intersection depending on objects' predicted path.
`when`: which timing to turning depending on objects' future paths. | -| 3. Avoiding parked vehicles | **Object Recognition- Detection**:
- Objects' class, shape and, position
**Object Recognition- Tracking**:
- Objects' velocity | To decide `where` to avoid objects depending on objects' properties.
`where`: where to avoid objects in given area depending on objects' class, velocity, shape and position. | -| 4. Stopping at a crosswalk when pedestrians are walking | **Object Recognition- Prediction**:
- Predicted paths of objects at a crosswalk | To decide where stopping based on pedestrians' position and velocity. | -| 5. Passing intersection without traffic lights | **Object Recognition- Detection**:
- Objects' shape.
**Object Recognition- Prediction**:
- Predicted paths of objects at an intersection | To decide `when` passing intersection depending on objects' properties.
`when`: which timing to pass intersection while negotiating with other objects based on objects' properties like, predicted paths and shape. | -| Merging into another lane | **Object Recognition- Prediction**:
- Predicted paths of objects at merging area | To decide when merging into another lane depending objects' predicted paths. | -| 6. Taking over Pedestrian/Cyclists | **Object Recognition- Detection**:
- Objects' shape, position and orientation.
**Object Recognition- Tracking**:
- Objects' velocity | To decide `when` and `where` taking over depending on objects' predicted paths
`when`: which timing to taking over depending on obstacles position and velocity.
`where`: where to go depending on objects' position and shape. | -| 7. Stopping/yielding to an obstacle | **Object Recognition- Detection**:
- Objects' shape, position, and orientation
**Object Recognition- Tracking**:
- Objects' velocity | To decide where to stop/yield based on pedestrians' position and velocity. | -| 8. Passing intersection with traffic lights | **Traffic Light Recognition- Classification**:
- Traffic signal status | To decide whether to go or stop based on traffic signal status. | +| Use case | Required output from `Perception` | How the output is used | +| ------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 1. Changing lane | **Object Recognition-Prediction**:
- Predicted paths of objects on target lane | To decide `when` and `where` changing lane depending on objects' predicted paths.
`when`: which timing to trigger lane change depending on obstacles position and velocity.
`where`: where to go depending on objects' position and shape. | +| 2. Turning at intersection | **Object Recognition- Prediction**:
- Predicted paths of objects at an intersection | To decide `when` turning at an intersection depending on objects' predicted path.
`when`: which timing to turning depending on objects' future paths. | +| 3. Avoiding parked vehicles | **Object Recognition- Detection**:
- Objects' class, shape and, position
**Object Recognition- Tracking**:
- Objects' velocity | To decide `where` to avoid objects depending on objects' properties.
`where`: where to avoid objects in given area depending on objects' class, velocity, shape and position. | +| 4. Stopping at a crosswalk when pedestrians are walking | **Object Recognition- Prediction**:
- Predicted paths of objects at a crosswalk | To decide where stopping based on pedestrians' position and velocity. | +| 5. Passing intersection without traffic lights | **Object Recognition- Detection**:
- Objects' shape.
**Object Recognition- Prediction**:
- Predicted paths of objects at an intersection | To decide `when` passing intersection depending on objects' properties.
`when`: which timing to pass intersection while negotiating with other objects based on objects' properties like, predicted paths and shape. | +| Merging into another lane | **Object Recognition- Prediction**:
- Predicted paths of objects at merging area | To decide when merging into another lane depending objects' predicted paths. | +| 6. Taking over Pedestrian/Cyclists | **Object Recognition- Detection**:
- Objects' shape, position and orientation.
**Object Recognition- Tracking**:
- Objects' velocity | To decide `when` and `where` taking over depending on objects' predicted paths
`when`: which timing to taking over depending on obstacles position and velocity.
`where`: where to go depending on objects' position and shape. | +| 7. Stopping/yielding to an obstacle | **Object Recognition- Detection**:
- Objects' shape, position, and orientation
**Object Recognition- Tracking**:
- Objects' velocity | To decide where to stop/yield based on pedestrians' position and velocity. | +| 8. Passing intersection with traffic lights | **Traffic Light Recognition- Classification**:
- Traffic signal status | To decide whether to go or stop based on traffic signal status. | ## Requirements + From above table, high-level requirements of Perception stack are summarized as below: 1. Perception stack should recognize following objects: (Use Case 3, 5, 6) - 1. vehicle - 2. pedestrian - 3. cyclists - 4. other objects that is on road or parking lot + 1. vehicle + 2. pedestrian + 3. cyclists + 4. other objects that is on road or parking lot 2. For each object, Perception stack should provide following information: (Use Case 1-7) - 1. Pose (done in ObjectDetection) - 2. Shape (done in ObjectDetection) - 3. Predicted future path (done in Object Tracking+Prediction) + 1. Pose (done in ObjectDetection) + 2. Shape (done in ObjectDetection) + 3. Predicted future path (done in Object Tracking+Prediction) 3. Perception stack should provide traffic light information: (Use Case 8) - 1. The status of traffic light - 2. Unique id of traffic light from map + 1. The status of traffic light + 2. Unique id of traffic light from map ## Input -| Input | Topic (Data Type) | Explanation | -| ---------------------- | ---------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| LiDAR | `/sensing/lidar/pointcloud`
(`sensor_msgs::PointCoud2`) | Pointcloud data captured by LiDAR comes from Sensing stack. Perception stack is allowed to choose subscribing to pointcloud with/without background depending on algorithms. | -| Camera | `/sensing/{camera_name}/image`
(`sensor_msgs::Image`) | Image data captured by Camera comes from Sensing stack. CameraInfo contains intrinsic parameters for the image. | -| Map | `/map/vector_map`
(`autoware_lanelet2_msgs::MapBin`) | This is Map data in lanelet2 format. Map stack has utility packages for processing map data. | -| Drive Route (optional) | `/planning/route`
(`autoware_planning_msgs::Route`) | This is route information for reaching a destination. In Perception stack, it is used for detecting the traffic lights associated with route information. | +| Input | Topic (Data Type) | Explanation | +| ---------------------- | ----------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| LiDAR | `/sensing/lidar/pointcloud`
(`sensor_msgs::PointCloud2`) | Pointcloud data captured by LiDAR comes from Sensing stack. Perception stack is allowed to choose subscribing to pointcloud with/without background depending on algorithms. | +| Camera | `/sensing/{camera_name}/image`
(`sensor_msgs::Image`) | Image data captured by Camera comes from Sensing stack. CameraInfo contains intrinsic parameters for the image. | +| Map | `/map/vector_map`
(`autoware_lanelet2_msgs::MapBin`) | This is Map data in lanelet2 format. Map stack has utility packages for processing map data. | +| Drive Route (optional) | `/planning/route`
(`autoware_planning_msgs::Route`) | This is route information for reaching a destination. In Perception stack, it is used for detecting the traffic lights associated with route information. | ## Output -| Output | Topic Name (Data Type) | Explanation | -| ------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Dynamic Object | `/perception/object_recognition/objects`
(`autoware_perception_msgs::DynamicObjectArray`) | This includes obstacles' information. An obstacle is described by 3 major properties; State, Shape, Semantic. Detail design for these properties is in below Object Recognition section. | -| Traffic Light State | `/perception/traffic_light_recognition/traffic_light_states`
(`autoware_perception_msgs::TrafficLightStateArray`) | This includes the status of traffic light signals in array format. | +| Output | Topic Name (Data Type) | Explanation | +| ------------------- | -------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Dynamic Object | `/perception/object_recognition/objects`
(`autoware_perception_msgs::DynamicObjectArray`) | This includes obstacles' information. An obstacle is described by 3 major properties; State, Shape, Semantic. Detail design for these properties is in below Object Recognition section. | +| Traffic Light State | `/perception/traffic_light_recognition/traffic_light_states`
(`autoware_perception_msgs::TrafficLightStateArray`) | This includes the status of traffic light signals in array format. | - -# Design +## Design This Perception stack consists of 2 separated modules and each module can be subdivided into following components: - Object Recognition (satisfies Requirement 1 and 2) - - Detection - - Tracking - - Prediction + - Detection + - Tracking + - Prediction - Traffic Light Recognition (satisfies requirement 3) - - Detection - - Classification + - Detection + - Classification ![Perception_component](image/PerceptionComponent.png) -**Key points of the structure** +### Key points of the structure - Interfaces are separated according to the current algorithm level. - Enable complex autonomous driving use cases by including information like objects' future movement. @@ -83,6 +85,7 @@ This Perception stack consists of 2 separated modules and each module can be sub ## Object Recognition ### Role + Recognize obstacles that could potentially move. Provide detail information for obstacles required in the Planning stack. The motivation behind recognizing obstacles comes from a requirement for balancing safety and efficiency in autonomous driving. If emphasizing safety too much, it needs to consider every possible movement of obstacles. Autonomous vehicles could end up freezing. If emphasizing efficiency too much, recognize every object as static obstacles. A car could hit a pedestrian in an intersection because of the efficient drive to a destination. Balanced autonomous driving is achieved by recognizing obstacles. @@ -105,7 +108,7 @@ Detection component is responsible for clarifying the following objects' propert | orientation_reliable | Boolean for stable orientation or not | `bool` | `autoware_perception_msgs::State` | | shape | Shape in 3D bounding box, cylinder or polygon | `autoware_perception_msgs::Shape` | `autoware_perception_msgs::DynamicObject` | -#### Tracking +#### Tracking Tracking component deals with time-series processing. @@ -119,13 +122,13 @@ Tracking component is responsible for clarifying the following objects' property | acceleration | Acceleration in ROS accel format. | `geometry_msgs::AccelWithCovariance` | `autoware_perception_msgs::State` | | acceleration_reliable | Boolean for stable acceleration or not. | `bool` | `autoware_perception_msgs::State` | -#### Prediction +#### Prediction Prediction component is responsible for clarifying the following objects' property. -| Property | Definition | Data Type | Parent Data Type | -| -------------- | ------------------------------------- | ---------------------------------------------- | --------------------------------- | -| predicted_path | Predicted future paths for an object. | `autoware_perception_msgs::PredictedPath[] ` | `autoware_perception_msgs::State` | +| Property | Definition | Data Type | Parent Data Type | +| -------------- | ------------------------------------- | ------------------------------------------- | --------------------------------- | +| predicted_path | Predicted future paths for an object. | `autoware_perception_msgs::PredictedPath[]` | `autoware_perception_msgs::State` | Necessary information is defined in `autoware_perception_msg::DynamicObjectArray.msg` with layered msg structure. diff --git a/docs/design/software_architecture/Perception/TrafficLightRecognition/Classification/Classification.md b/docs/design/software_architecture/Perception/TrafficLightRecognition/Classification/Classification.md index 8442fc936347d..2bad20c346f05 100644 --- a/docs/design/software_architecture/Perception/TrafficLightRecognition/Classification/Classification.md +++ b/docs/design/software_architecture/Perception/TrafficLightRecognition/Classification/Classification.md @@ -1,9 +1,11 @@ -Classification -===== +# Classification + ## Use Cases and Requirements + Classification in Traffic Light Recognition is required for use cases involved with traffic light: -* Passing the intersection when traffic light is green -* Stopping at the intersection when traffic signal is red + +- Passing the intersection when traffic light is green +- Stopping at the intersection when traffic signal is red For the details about related requirements, please refer to the [document for Perception stack](/design/Perception/Perception.md). @@ -16,7 +18,7 @@ Classification module recognizes traffic signal status. Unique signal types are | Input | Data Type | Topic | | --------------------------------- | ---------------------------------------------------- | ------------------------------------------ | | Cropped traffic light information | `autoware_perception_msgs::TrafficLightRoiArray.msg` | /perception/traffic_light_recognition/rois | -| Camera | `sensor_msgs::Image` | /sensing/camera/*/image_raw | +| Camera | `sensor_msgs::Image` | /sensing/camera/\*/image_raw | ## Output @@ -25,10 +27,10 @@ Classification module recognizes traffic signal status. Unique signal types are | Traffic signal status | `autoware_traffic_light_msgs::TrafficLightStateArray` | Planning | /perception/traffic_light_recognition/traffic_light_states | ## Design + This is our sample implementation for the Classification module. ![msg](../image/LightClassificationDesign.png) - Unique signals are handled in `autoware_traffic_light_msgs::LampState`. When requiring to detect local unique signals which are not defined here, need to add them in `autoware_traffic_light_msgs::LampState`. ![msg](../../image/PerceptionTrafficLightMsg.png) diff --git a/docs/design/software_architecture/Perception/TrafficLightRecognition/Detection/Detection.md b/docs/design/software_architecture/Perception/TrafficLightRecognition/Detection/Detection.md index aa1cf8ce90d8c..1f715004432f1 100644 --- a/docs/design/software_architecture/Perception/TrafficLightRecognition/Detection/Detection.md +++ b/docs/design/software_architecture/Perception/TrafficLightRecognition/Detection/Detection.md @@ -1,10 +1,11 @@ -Detection -===== +# Detection ## Use Cases and Requirements + Detection in Traffic Light Recognition is required for use cases involved with traffic light: -* Passing intersection when traffic signal is green -* Stopping at intersection when traffic signal is red + +- Passing intersection when traffic signal is green +- Stopping at intersection when traffic signal is red For the details about related requirements, please refer to the [document for Perception stack](/design/Perception/Perception.md). @@ -14,12 +15,12 @@ Detection module in Traffic Light Recognition finds traffic lights' region of in ## Input -| Input | Data Type | Topic | -| ----------- | -------------------------------- | ----------------------------- | -| Camera | `sensor_msgs::Image` | /sensing/camera/*/image_raw | -| Camera info | `sensor_msgs::CameraInfo` | /sensing/camera/*/camera_info | -| Map | `autoware_lanelet2_msgs::MapBin` | /map/vector_map | -| TF | `tf2_msgs::TFMessage` | /tf | +| Input | Data Type | Topic | +| ----------- | -------------------------------- | ------------------------------ | +| Camera | `sensor_msgs::Image` | /sensing/camera/\*/image_raw | +| Camera info | `sensor_msgs::CameraInfo` | /sensing/camera/\*/camera_info | +| Map | `autoware_lanelet2_msgs::MapBin` | /map/vector_map | +| TF | `tf2_msgs::TFMessage` | /tf | ## Output @@ -28,6 +29,7 @@ Detection module in Traffic Light Recognition finds traffic lights' region of in | Cropped traffic light ROI information | `autoware_perception_msgs::TrafficLightRoiArray.msg` | Traffic Light Recognition: Classification | /perception/traffic_light_recognition/rois | ## Design + The Detection module is designed to modularize some patterns of detecting traffic lights' ROI. ![msg](../image/LightDetectionDesign.png) diff --git a/docs/design/software_architecture/Planning/DesignRationale.md b/docs/design/software_architecture/Planning/DesignRationale.md index 7372d9038fa5d..222e92894ab5c 100644 --- a/docs/design/software_architecture/Planning/DesignRationale.md +++ b/docs/design/software_architecture/Planning/DesignRationale.md @@ -1,15 +1,18 @@ # Planning Architecture Rationale ## Requirements for Planning + Planning architecture must be able to support any functions required to achieve the overall use case stated in [Overview](../Overview.md) -This includes: +This includes: + - Calculates route that navigates to the desired goal - Plans maneuver to follow the route (e.g. when to lane change, when to turn at intersection) - Make sure that vehicle does not collide with obstacles, including pedestrians and other vehicles) -- Make sure that the vehicle follows traffic rules during the navigation. This includes following traffic light, stopping at stoplines, stopping at crosswalks, etc. +- Make sure that the vehicle follows traffic rules during the navigation. This includes following traffic light, stopping at stoplines, stopping at crosswalks, etc. Also, since Autoware is open source software and is meant to be used/developed by anyone around the world, the architecture must be: + - Architecture is extensible enough to integrate new algorithms without changing the interface - Architecture is extensible enough to adapt to new traffic rules for different countries @@ -20,60 +23,74 @@ Before designing planning architecture, we have looked into papers including one The summary is explained below. ### Boss + System overview of Boss is explained in this [paper](https://www.ri.cmu.edu/pub_files/pub4/urmson_christopher_2008_1/urmson_christopher_2008_1.pdf) The planner is decomposed into three layers: mission, behavior, and motion. Mission calculates the high-level global path from starting point to goal point, behavior makes tactical decisions such as lane change decisions and maneuvers at an intersection, and motion calculates low-level trajectory with consideration of kinematic model of the vehicle. -**pros** -* It is intuitive. Data flow is one-directional from mission to behavior to motion. Similar approach was taken by different teams in the DARPA Urban Challenge. -* It is suitable for OSS used world-wide since all traffic rule handling is done in the behavior layer, and developers only need to modify the behavior layer to support their local rules. - -**cons** -* Behavior layer can only make "conservative" decisions. Since the behavior layer has no way of knowing the actual trajectory that is calculated by following motion layer, the behavior layer cannot be certain about the validity of decisions. For example, the behavior planner can command lane change only when it is obvious that there is no obstacle in the target lane, and it is not possible to do "aggressive" lane change as an architecture. +#### pros + +- It is intuitive. Data flow is one-directional from mission to behavior to motion. Similar approach was taken by different teams in the DARPA Urban Challenge. +- It is suitable for OSS used world-wide since all traffic rule handling is done in the behavior layer, and developers only need to modify the behavior layer to support their local rules. + +#### cons + +- Behavior layer can only make "conservative" decisions. Since the behavior layer has no way of knowing the actual trajectory that is calculated by following motion layer, the behavior layer cannot be certain about the validity of decisions. For example, the behavior planner can command lane change only when it is obvious that there is no obstacle in the target lane, and it is not possible to do "aggressive" lane change as an architecture. ### Unified Behavior and Motion + This [paper](https://www.researchgate.net/publication/315067229_Improved_Trajectory_Planning_for_On-Road_Self-Driving_Vehicles_Via_Combined_Graph_Search_Optimization_Topology_Analysis) reviews planning architecture used in the Darpa Urban Challenge and addresses the demerit of splitting behavior layer and motion layer. It also proposes an algorithm to handle decision making and trajectory optimization simultaneously. -**pros** -* It can make more "aggressive" decisions compared to BOSS type architecture. +#### pros + +- It can make more "aggressive" decisions compared to BOSS type architecture. -**Cons** -* The paper focuses on lane change, but the real environment requires a variety of decision making, such as traffic lights and crosswalks. It is questionable that we can handle different kinds of traffic rules with one optimization algorithm. -* Also, since decision making is also the target of optimization, it is usually difficult to add user-specific constraints to decision making. +#### Cons + +- The paper focuses on lane change, but the real environment requires a variety of decision making, such as traffic lights and crosswalks. It is questionable that we can handle different kinds of traffic rules with one optimization algorithm. +- Also, since decision making is also the target of optimization, it is usually difficult to add user-specific constraints to decision making. ### Victor Tango Type + System overview of Victor Tango is explained in this [paper](https://archive.darpa.mil/grandchallenge/TechPapers/Victor_Tango.pdf). Victor Tango split Behavior and Motion layer like Boss type. However, unlike BOSS type, there is feedback from motion whether a decision made by behavior layer is achievable or not. -**Pros** -* It overcomes the weakness of Boss type and can consider trajectory at the behavior level -**Cons** -* The interface between behavior and motion would be tight and dense. It has the risk of having heavy inter-dependency as new traffic rules are added, making it difficult to replace one of the modules with new algorithms in the future. +#### Pros + +- It overcomes the weakness of Boss type and can consider trajectory at the behavior level + +#### Cons + +- The interface between behavior and motion would be tight and dense. It has the risk of having heavy inter-dependency as new traffic rules are added, making it difficult to replace one of the modules with new algorithms in the future. ### Apollo + Here is the [link](https://github.com/ApolloAuto/apollo/tree/r5.0.0/modules/planning) Apollo kept updating the planning module at each version update. In version 5.0, they have taken a different approach from others. Apollo split the behavior planner into scenarios, such as intersection, parking, and lane following. In each scenario module, it calls decider and optimizer libraries to achieve specific scenarios -**Pros** -* Specific parameter tunings are available for different scenarios, making it relatively easier to add new traffic rules for different countries -* Different optimizers can be used for a different purpose +#### Pros + +- Specific parameter tunings are available for different scenarios, making it relatively easier to add new traffic rules for different countries +- Different optimizers can be used for a different purpose -**Cons** -* As the number of scenario increases, planning selector(or scenario selector) may get very complex as the number of scenario increases +#### Cons + +- As the number of scenario increases, planning selector(or scenario selector) may get very complex as the number of scenario increases ## Autoware Planning Architecture -Considering pros and cons of different approaches, we have concluded to take the hybrid approach of Apollo and Boss. -We divided planning modules into scenarios just like Apollo, but into a more high-level scenario, such as LaneDriving and Parking. Apollo has smaller units of scenarios, such as intersection and traffic lights, but we expect that those scenarios may occur in parallel(e.g. intersection with traffic lights and crosswalks) and preparing combined-scenario modules would make scenario-selector too complex to be maintained. Instead, we made a scenario to be a more broad concept to keep the number of scenarios to be lower to keep scenario selector comprehensible. Currently, we only have LaneDriving and Parking, and we anticipate to have HighWay and InEmergency scenarios in the future. +Considering pros and cons of different approaches, we have concluded to take the hybrid approach of Apollo and Boss. + +We divided planning modules into scenarios just like Apollo, but into a more high-level scenario, such as LaneDriving and Parking. Apollo has smaller units of scenarios, such as intersection and traffic lights, but we expect that those scenarios may occur in parallel(e.g. intersection with traffic lights and crosswalks) and preparing combined-scenario modules would make scenario-selector too complex to be maintained. Instead, we made a scenario to be a more broad concept to keep the number of scenarios to be lower to keep scenario selector comprehensible. Currently, we only have LaneDriving and Parking, and we anticipate to have HighWay and InEmergency scenarios in the future. -More investigation is required to establish the definition of a “Scenario”, but the convention is that a new scenario must only be added whenever a different "paradigm" is needed for planning. +More investigation is required to establish the definition of a “Scenario”, but the convention is that a new scenario must only be added whenever a different "paradigm" is needed for planning. -For example, LaneDriving is used to drive along public roads, whereas Parking is used for driving free space, and it would be difficult to develop an algorithm to support requirements for both scenarios. LaneDriving wouldn't require complex path planner as the shape of lanes are already given from HD map, but it must be done at a higher frequency to drive at higher speed, whereas parking requires more complex maneuvers with cut-backs but with lower constraint about computation time. Therefore, it would make more sense to split them into different scenarios, and design a planner to support contradicting requirements. +For example, LaneDriving is used to drive along public roads, whereas Parking is used for driving free space, and it would be difficult to develop an algorithm to support requirements for both scenarios. LaneDriving wouldn't require complex path planner as the shape of lanes are already given from HD map, but it must be done at a higher frequency to drive at higher speed, whereas parking requires more complex maneuvers with cut-backs but with lower constraint about computation time. Therefore, it would make more sense to split them into different scenarios, and design a planner to support contradicting requirements. -Note that we didn't split LaneDriving into smaller scenarios, unlike Apollo. We have considered all traffic rule related scenes on public roads, including yielding, traffic lights, crosswalks, and bare intersections, to be essentially velocity planning along lanes and can be handled within a single scheme. +Note that we didn't split LaneDriving into smaller scenarios, unlike Apollo. We have considered all traffic rule related scenes on public roads, including yielding, traffic lights, crosswalks, and bare intersections, to be essentially velocity planning along lanes and can be handled within a single scheme. +## Reference -# Reference -* Boss: https://www.ri.cmu.edu/pub_files/pub4/urmson_christopher_2008_1/urmson_christopher_2008_1.pdf -* CMU Doctor These: https://www.researchgate.net/publication/315067229_Improved_Trajectory_Planning_for_On-Road_Self-Driving_Vehicles_Via_Combined_Graph_Search_Optimization_Topology_Analysis -* Victor Tango: https://archive.darpa.mil/grandchallenge/TechPapers/Victor_Tango.pdf -* Apollo Auto: https://github.com/ApolloAuto/apollo/tree/r5.0.0/modules/planning +- Boss: +- CMU Doctor These: +- Victor Tango: +- Apollo Auto: diff --git a/docs/design/software_architecture/Planning/LaneDriving/Behavior/BehaviorVelocityPlanner.md b/docs/design/software_architecture/Planning/LaneDriving/Behavior/BehaviorVelocityPlanner.md index 6c2d7307f31f8..dc02a96f40a95 100644 --- a/docs/design/software_architecture/Planning/LaneDriving/Behavior/BehaviorVelocityPlanner.md +++ b/docs/design/software_architecture/Planning/LaneDriving/Behavior/BehaviorVelocityPlanner.md @@ -1,67 +1,75 @@ - # Overview + Behavior velocity planner is responsible for modifying velocity so that ego vehicle drives according to traffic rules. ## Inputs -topic name: "path_with_lane_id" + +topic name: "path_with_lane_id" type: autoware_planning_msgs::PathWithLaneId frequency: 10Hz ## Outputs + Behavior velocity planner should output path with velocity profile using following message type. -topic name: "path" +topic name: "path" type: autoware_planning_msgs::Path frequency: 10Hz ## Use Cases + Behavior planner is responsible for handling different traffic rules present in the environment. Note that traffic rules might be different depending on country. Therefore, behavior velocity planner node must be designed to allow developers to easily modify the traffic rules. Currently the following traffic rules are considered in the reference implementation: + 1. crosswalk 2. intersection 3. stop line 4. traffic light ## Requirements + Again, traffic rules might be different depending on country, and the requirement described here is for Japanese traffic rules. 1. **Crosswalk** - * Ego vehicle must stop if a pedestrian is within stop area shown in the image. - * Ego vehicle must stop if a pedestrian is expected to enter the stop area. (e.g. within 3 seconds) - * Ego vehicle must slow down (e.g. to 5km/h) when there is a pedestrian in the deceleration area shown in the image. - **rationale:** We don't want to make vehicle keep waiting for pedestrian to pass crosswalk completely. We want vehicle to start driving when pedestrian walks past the car. + - Ego vehicle must stop if a pedestrian is within stop area shown in the image. + - Ego vehicle must stop if a pedestrian is expected to enter the stop area. (e.g. within 3 seconds) + - Ego vehicle must slow down (e.g. to 5km/h) when there is a pedestrian in the deceleration area shown in the image. + **rationale:** We don't want to make vehicle keep waiting for pedestrian to pass crosswalk completely. We want vehicle to start driving when pedestrian walks past the car. ![Crosswalk](image/Crosswalk.png) 2. **Intersection** - * Unless ego vehicle has right of way, Vehicle must stop before entering intersection whenever other vehicles are entering intersection. + - Unless ego vehicle has right of way, Vehicle must stop before entering intersection whenever other vehicles are entering intersection. ![Intersection](image/Intersection.png) 3. **Stop Line (Stop Sign)** - * Vehicle must stop at stopline when it passes through the line. - * Stop line should be specified in the Semantic Map. + - Vehicle must stop at stopline when it passes through the line. + - Stop line should be specified in the Semantic Map. ![Stopline](image/Stopline.png) 4. **Follow traffic light** - * Planning stack should refer to Perception output of the traffic light associated to driving lane. - * Speed profile of a trajectory at the associated stopline must be zero when relevant traffic light is red and it has enough distance to stop before the stopline with given deceleration configuration + - Planning stack should refer to Perception output of the traffic light associated to driving lane. + - Speed profile of a trajectory at the associated stopline must be zero when relevant traffic light is red and it has enough distance to stop before the stopline with given deceleration configuration ![TrafficLight](image/TrafficLight.png) ## Design + The example node diagram of the behavior velocity planner is shown as the diagram below. ![BehaviorVelocityPlanner](image/BehaviorVelocityPlanner.svg) The node consist of two steps. -1. **Module Instance Update** From the given path, the planner will look up the HD Map to see if there are upcoming traffic rule features in the map. For each detected traffic rule features, respective scene module instance will be generated. Also, scene module instances from past features will be deleted. + +1. **Module Instance Update** From the given path, the planner will look up the HD Map to see if there are upcoming traffic rule features in the map. For each detected traffic rule features, respective scene module instance will be generated. Also, scene module instances from past features will be deleted. 2. **Velocity Update** Actual modification of the velocity profile of the path happens in this step. Path message will be passed to each traffic rule scene modules, and each module will update velocity according to its traffic rule. Minimum value of all scene modules will become the final velocity profile. ![BehaviorVelocityInstance](image/BehaviorVelocityInstance.png) -##### Adding/Modifying Traffic Rules +### Adding/Modifying Traffic Rules + The actual rule for modifying velocity of the path is defined in scene modules. Whenever a developer want to add new rules to the planner, the developer must define a new traffic rule scene module class which inherits SceneModuleInterface class. For the details, please refer to the [source code](/src/planning/scenario_planning/lane_driving/behavior_planning/behavior_velocity_planner). diff --git a/docs/design/software_architecture/Planning/LaneDriving/Behavior/LaneChangePlanner.md b/docs/design/software_architecture/Planning/LaneDriving/Behavior/LaneChangePlanner.md index ee39b0de69181..27e36ed6fc317 100644 --- a/docs/design/software_architecture/Planning/LaneDriving/Behavior/LaneChangePlanner.md +++ b/docs/design/software_architecture/Planning/LaneDriving/Behavior/LaneChangePlanner.md @@ -1,41 +1,49 @@ - # Overview -Lane Change Planner should create path from Route message, and navigates to given goal by either following lane or changing lanes. + +Lane Change Planner should create path from Route message, and navigates to given goal by either following lane or changing lanes. ## Inputs + Input : topic "route" with type autoware_planning_msgs::Route ## Outputs -topic name: "path_with_lane_id" + +topic name: "path_with_lane_id" type: autoware_planning_msgs::PathWithLaneId frequency: 10Hz ## Assumptions -* Vehicle travels at constant speed + +- Vehicle travels at constant speed ## Requirements + ### About goal pose -* If the given goal pose in route message is within route, then goal pose is used directly -* If the given goal pose in route message is not within route (e.g. in parking space), then the goal shall be replaced by closest point along the centerline of goal lane + +- If the given goal pose in route message is within route, then goal pose is used directly +- If the given goal pose in route message is not within route (e.g. in parking space), then the goal shall be replaced by closest point along the centerline of goal lane ### About Drivable Area -* drivable area should be the shape of lanes that vehicle is driving when BPP is following lane -* drivable area should be the shape of current lane and lane change target lanes when BPP is operating lane change + +- drivable area should be the shape of lanes that vehicle is driving when BPP is following lane +- drivable area should be the shape of current lane and lane change target lanes when BPP is operating lane change ### About Path Points -* LaneChanePlanner should publish reference path that leads to goal pose -* Path should start from n [m] behind vehicle position -* Path should have length of at least 100[m] unless path surpasses goal -* All points in Path should be placed within drivable area -* Path within the n [m] away from vehicle should not change over time to avoid sudden change in steering. + +- LaneChangePlanner should publish reference path that leads to goal pose +- Path should start from n [m] behind vehicle position +- Path should have length of at least 100[m] unless path surpasses goal +- All points in Path should be placed within drivable area +- Path within the n [m] away from vehicle should not change over time to avoid sudden change in steering. ### About Lane Change -* Vehicle follows lane if vehicle is on preferred lanes -* Vehicle should stay in lane at least for 3 second before operating lane change for other participants to recognize ego vehicle's turn signal. -* The planner attempts lane change towards preferred lane if vehicle is not within preferred lanes, and candidate lane change path is valid. The path is valid if it satisfies all the following conditions: - * there is 2seconds margin between any other vehicles assuming that ego vehicle follows the candidate path at constant speed - * lane change finishes x [m] before any intersections - * lane change finishes x [m] before any crosswalks -* LaneChangePlanner shall abort lane change and go back to original lane when all of the following conditions are satisfied: - * Vehicle(base_link) is still in the original lane - * there is no n seconds margin between all other vehicles during lane change, assuming that ego vehicle follows the candidate path at constant speed. (due to newly detected vehicles) \ No newline at end of file + +- Vehicle follows lane if vehicle is on preferred lanes +- Vehicle should stay in lane at least for 3 second before operating lane change for other participants to recognize ego vehicle's turn signal. +- The planner attempts lane change towards preferred lane if vehicle is not within preferred lanes, and candidate lane change path is valid. The path is valid if it satisfies all the following conditions: + - there is 2seconds margin between any other vehicles assuming that ego vehicle follows the candidate path at constant speed + - lane change finishes x [m] before any intersections + - lane change finishes x [m] before any crosswalks +- LaneChangePlanner shall abort lane change and go back to original lane when all of the following conditions are satisfied: + - Vehicle(base_link) is still in the original lane + - there is no n seconds margin between all other vehicles during lane change, assuming that ego vehicle follows the candidate path at constant speed. (due to newly detected vehicles) diff --git a/docs/design/software_architecture/Planning/LaneDriving/LaneDrivingScenario.md b/docs/design/software_architecture/Planning/LaneDriving/LaneDrivingScenario.md index cc38eac4209ed..09327b23f2419 100644 --- a/docs/design/software_architecture/Planning/LaneDriving/LaneDrivingScenario.md +++ b/docs/design/software_architecture/Planning/LaneDriving/LaneDrivingScenario.md @@ -1,15 +1,19 @@ # Lane Driving Scenario + ## Use Cases and Requirements + Lane Driving Scenario must satisfy the following use cases: -* Driving along lane -* Operating lane change -* Following speed limit of lane -* Follow traffic light -* Turning left/right at intersections + +- Driving along lane +- Operating lane change +- Following speed limit of lane +- Follow traffic light +- Turning left/right at intersections For the details about related requirements, please refer to the [document for Planning stack](/design/Planning/Planning.md). ## Input + - Route: `autoware_planning_msgs::Route`
This includes the final goal pose and which lanes are available for trajectory planning. - Map: `autoware_lanelet_msgs::MapBin`
This provides all static information about the environment, including lane connection, lane geometry, and traffic rules. Scenario module should plan trajectory such that vehicle follows all traffic rules specified in map. - Dynamic Objects: `autoware_perception_msgs::DynamicObjectArray`
This provides all obstacle information calculated from sensors. Scenario module should calculate trajectory such that vehicle does not collide with other objects. This can be either done by planning velocity so that it stops before hitting obstacle, or by calculate path so that vehicle avoids the obstacle. @@ -21,41 +25,49 @@ For the details about related requirements, please refer to the [document for Pl - Turn Signal: `autoware_vehicle_msgs::TurnSignal`
Turn signal command should also be published because Scenario module is only aware of the traffic rules and operating maneuvers in the whole Autoware stack. ## Design -Lane Driving scenario is decomposed into following modules: LaneChangePlanner, BehaviorVelocityPlanner, MotionPlanner and TurnSignalDecider. + +Lane Driving scenario is decomposed into following modules: LaneChangePlanner, BehaviorVelocityPlanner, MotionPlanner and TurnSignalDecider. ![LaneDrivingScenario](image/LaneDrivingScenario.svg) ### Behavior Planner + Behavior Planner plans the path, which includes reference trajectory(i.e. path points) for motion planner to optimize and drivable area. General idea is that behavior layer sets constraints according to traffic rules to ensure optimized trajectory follows traffic rules. It is decomposed into: -* LaneChangePlanner that decides lane change -* BehaviorVelocityPlanner that plans the velocity profile according to traffic rules -* TurnSignalDecider that decides turn signals according to planned behavior + +- LaneChangePlanner that decides lane change +- BehaviorVelocityPlanner that plans the velocity profile according to traffic rules +- TurnSignalDecider that decides turn signals according to planned behavior #### Input + - Route: `autoware_planning_msgs::Route`
This includes the final goal pose and which lanes are available for trajectory planning. - Map: `autoware_lanelet_msgs::MapBin`
This provides all static information about the environment, including lane connection, lane geometry, and traffic rules. Scenario module should plan trajectory such that vehicle follows all traffic rules specified in map. - Dynamic Objects: `autoware_perception_msgs::DynamicObjectArray`
This provides all obstacle information calculated from sensors. Scenario module should calculate trajectory such that vehicle does not collide with other objects. This can be either done by planning velocity so that it stops before hitting obstacle, or by calculate path so that vehicle avoids the obstacle. - Scenario: `autoware_planning_msgs::Scenario`
This is the message from scenario selector. Scenario modules only run when the module is selected by this topic. #### Output + - Path: `autoware_planning_msgs::Path` This message contains path points, which are reference points that will be optimized by motion planner, and drivable area which indicates the space in which motion planner is allowed to change position of path points during optimization. - Turn Signal: `autoware_vehicle_msgs::TurnSignal`
Turn signal command should also be published because Scenario module is only aware of the traffic rules and operating maneuvers in the whole Autoware stack. - ### MotionPlanner + Motion planner is responsible for following functions: -* Optimize the shape of trajectory with given lateral acceleration and jerk limit - * Motion planner should not manipulate position of trajectory points from behavior planner outside given drivable area -* Optimize the velocity of trajectory with given acceleration and jerk limit - * Motion planner is only allowed to decrease the speed profile given from behavior planner since all traffic rules(such as speed limits) are considered in behavior planner. -* Interpolate trajectory points with enough resolution for Control + +- Optimize the shape of trajectory with given lateral acceleration and jerk limit + - Motion planner should not manipulate position of trajectory points from behavior planner outside given drivable area +- Optimize the velocity of trajectory with given acceleration and jerk limit + - Motion planner is only allowed to decrease the speed profile given from behavior planner since all traffic rules(such as speed limits) are considered in behavior planner. +- Interpolate trajectory points with enough resolution for Control #### Input -* Path: `autoware_planning_msgs::Path`
+ +- Path: `autoware_planning_msgs::Path`
This contains reference trajectory points, speed constraints, and geometrical constraints for optimization. #### Output + - Trajectory: `autoware_planning_msgs::Trajectory`
This contains trajectory that Control must follow. The shape and velocity of the trajectory must satisfy given acceleration and jerk limits. diff --git a/docs/design/software_architecture/Planning/Parking/ParkingScenario.md b/docs/design/software_architecture/Planning/Parking/ParkingScenario.md index 89548e8e8e7fe..5bbdf4dbdf681 100644 --- a/docs/design/software_architecture/Planning/Parking/ParkingScenario.md +++ b/docs/design/software_architecture/Planning/Parking/ParkingScenario.md @@ -1,42 +1,54 @@ # Parking Scenario + This scenario is meant to be used to plan maneuvers to park vehicle in parking space. Compared to LaneDrivingScenario, this scenario has relative fewer constraints about the shape of trajectory. -## Requirements: +## Requirements + Lane Driving Scenario must satisfy the following use cases: -* Park vehicle in parking space -For the details about related requirements, please refer to the [document for Planning stack](/design/Planning/Planning.md). +- Park vehicle in parking space +For the details about related requirements, please refer to the [document for Planning stack](/design/Planning/Planning.md). ### Input + - Route: `autoware_planning_msgs::Route`
This includes the final goal pose and which lanes are available for trajectory planning, but only goal pose is used for planning. - Map: `autoware_lanelet_msgs::MapBin`
This provides all static information about the environment. This is meant to be used to generate drivable area. - Dynamic Objects: `autoware_perception_msgs::DynamicObjectArray`
This provides all obstacle information calculated from sensors. Scenario module should calculate trajectory such that vehicle does not collide with other objects. This can be either done by planning velocity so that it stops before hitting obstacle, or by calculate path so that vehicle avoids the obstacle. - Scenario: `autoware_planning_msgs::Scenario`
This is the message from scenario selector. All modules only run when Parking scenario is selected by this topic. ### Outputs + - Trajectory: `autoware_planning_msgs::Trajectory`
This contains trajectory that Control must follow. The shape and velocity of the trajectory must satisfy all the requirements. ## Design + ![ParkingScenario.png](image/ParkingScenario.png) ### Costmap Generator + This gives spacial constraints to freespace planner. + #### Input + - Map: `autoware_lanelet_msgs::MapBin`
This provides all static information about the environment. This is meant to be used to generate drivable area. - Dynamic Objects: `autoware_perception_msgs::DynamicObjectArray`
This provides all obstacle information calculated from sensors. Scenario module should calculate trajectory such that vehicle does not collide with other objects. This can be either done by planning velocity so that it stops before hitting obstacle, or by calculate path so that vehicle avoids the obstacle. - Scenario: `autoware_planning_msgs::Scenario`
This is the message from scenario selector. All modules only run when Parking scenario is selected by this topic. #### Output -* Costmap: `nav_msgs::OccupancyGrid.msg`
This contains spaces that can be used for trajectory planning. The grid is considered not passable(occupied) if it is outside of parking lot polygon specified map, and perceived objects lie on the grid. + +- Costmap: `nav_msgs::OccupancyGrid.msg`
This contains spaces that can be used for trajectory planning. The grid is considered not passable(occupied) if it is outside of parking lot polygon specified map, and perceived objects lie on the grid. ### Freespace Planner + Freespace planner calculates trajectory that navigates vehicle to goal pose given by route. It must consider vehicle's kinematic model. #### Input + - Route: `autoware_planning_msgs::Route`
This includes the final goal pose and which lanes are available for trajectory planning, but only goal pose is used for planning. - Vehicle Pose: `tf_msgs::tf` - Vehicle Velocity: `geometry_msgs::Twist` #### Output + - Trajectory: `autoware_planning_msgs::Trajectory`
This contains trajectory that Control must follow. The shape and velocity of the trajectory must satisfy all the requirements. diff --git a/docs/design/software_architecture/Planning/Planning.md b/docs/design/software_architecture/Planning/Planning.md index eac5f51c37d93..66453eb48994e 100644 --- a/docs/design/software_architecture/Planning/Planning.md +++ b/docs/design/software_architecture/Planning/Planning.md @@ -1,20 +1,21 @@ -Planning -============= +# Planning -# Overview +## Overview -Planning stack acts as the “brain” of autonomous driving. It uses all the results from Localization, Perception, and Map stacks to decide its maneuver and gives final trajectory to Control stack. +Planning stack acts as the “brain” of autonomous driving. It uses all the results from Localization, Perception, and Map stacks to decide its maneuver and gives final trajectory to Control stack. ## Role + These are high-level roles of Planning stack: - Calculates route that navigates to desired goal - Plans trajectory to follow the route - Make sure that vehicle does not collide with obstacles, including pedestrians and other vehicles) - - Make sure that the vehicle follows traffic rules during the navigation. This includes following traffic light, stopping at stoplines, stopping at crosswalks, etc. + - Make sure that the vehicle follows traffic rules during the navigation. This includes following traffic light, stopping at stoplines, stopping at crosswalks, etc. - Plan sequences of trajectories that is feasible for the vehicle. (e.g. no sharp turns that is kinematically impossible) ## Use Cases + Planning stack must satisfy following use cases: 1. Navigate vehicle from start to goal @@ -28,47 +29,54 @@ Planning stack must satisfy following use cases: ## Requirements -1. **Planing route from start to goal** (Use Case 1) - * Planning stack should be able to get starting lane and goal lane from given start pose and goal pose either in earth frame or map frame - * Planning stack should be able to calculate sequences of lanes that navigates vehicle from start lane to goal lane that minimizes cost function(either time based cost or distance based cost) +1. **Planing route from start to goal** (Use Case 1) + + - Planning stack should be able to get starting lane and goal lane from given start pose and goal pose either in earth frame or map frame + - Planning stack should be able to calculate sequences of lanes that navigates vehicle from start lane to goal lane that minimizes cost function(either time based cost or distance based cost) 2. **Driving along lane** (Use Case 2) - * Vehicle must drive between left boundary and right boundary of driving lane - * The vehicle must have at least 2 seconds margin between other vehicles so that it has enough distance to stop without collision. [reference](https://www.cedr.eu/download/Publications/2010/e_Distance_between_vehicles.pdf) + + - Vehicle must drive between left boundary and right boundary of driving lane + - The vehicle must have at least 2 seconds margin between other vehicles so that it has enough distance to stop without collision. [reference](https://www.cedr.eu/download/Publications/2010/e_Distance_between_vehicles.pdf) 3. **Operating lane change** (Use Case 3) - * Vehicle must change lane when - * lane change is necessary to follow planned route - * If current driving lane is blocked (e.g. by parked vehicle) - * Vehicle must turn on appropriate turn signal 3 seconds before lane change and it must be turned on until lane change is finished - * Vehicle should stay in lane at least for 3 second before operating lane change for other participants to recognize ego vehicle's turn signal. - * there must be 2 seconds margin between any other vehicles during lane change - * lane change finishes 30m before any intersections - * vehicle should abort lane change when all of the following conditions are satisfied: - * Vehicle(base_link) is still in the original lane - * there is no longer 2 seconds margin between other n vehicles during lane change e.g. due to newly detected vehicles + + - Vehicle must change lane when + - lane change is necessary to follow planned route + - If current driving lane is blocked (e.g. by parked vehicle) + - Vehicle must turn on appropriate turn signal 3 seconds before lane change and it must be turned on until lane change is finished + - Vehicle should stay in lane at least for 3 second before operating lane change for other participants to recognize ego vehicle's turn signal. + - there must be 2 seconds margin between any other vehicles during lane change + - lane change finishes 30m before any intersections + - vehicle should abort lane change when all of the following conditions are satisfied: + - Vehicle(base_link) is still in the original lane + - there is no longer 2 seconds margin between other n vehicles during lane change e.g. due to newly detected vehicles 4. **Follow speed limit of lane** (Use Case 4) - * Speed profile of trajectory points in a lane must be below speed limit of the lane. + + - Speed profile of trajectory points in a lane must be below speed limit of the lane. 5. **Follow traffic light** (Use Case 5) - * Planning stack should refer to Perception output of the traffic light associated to driving lane. - * Speed profile of a trajectory at the associated stopline must be zero when relevant traffic light is red and it has enough distance to stop before the stopline with given deceleration configuration + + - Planning stack should refer to Perception output of the traffic light associated to driving lane. + - Speed profile of a trajectory at the associated stopline must be zero when relevant traffic light is red and it has enough distance to stop before the stopline with given deceleration configuration 6. **Turning left/right at intersections** (Use Case 6) - * Vehicle must stop before entering intersection whenever other vehicles are entering intersection unless ego vehicle has right of way + + - Vehicle must stop before entering intersection whenever other vehicles are entering intersection unless ego vehicle has right of way 7. **Parking** (Use Case 7) - * Vehicle must not hit other vehicle, curbs, or other obstacle during parking - * i.e. All points in planned trajectory has enough distance from other objects with ego vehicle's footprint taken into account + + - Vehicle must not hit other vehicle, curbs, or other obstacle during parking + - i.e. All points in planned trajectory has enough distance from other objects with ego vehicle's footprint taken into account 8. **General requirements to trajectory** - * Planned trajectory must satisfy requirements from Control stack: - * Planned trajectory must have speed profile that satisfies given acceleration and jerk limits unless vehicle is under emergency e.g. when pedestrian suddenly jumps into driving lane or front vehicle suddenly stops. - * Planned trajectory must be feasible by the given vehicle kinematic model - * Planned trajectory must satisfy given lateral acceleration and jerk limit - * Planned trajectory points within *n* [m] from ego vehicle should not change over time unless sudden steering or sudden acceleration is required to avoid collision with other vehicles. - * *n*[m] = *velocity_of_ego_vehicle* * *configured_time_horizon* + - Planned trajectory must satisfy requirements from Control stack: + - Planned trajectory must have speed profile that satisfies given acceleration and jerk limits unless vehicle is under emergency e.g. when pedestrian suddenly jumps into driving lane or front vehicle suddenly stops. + - Planned trajectory must be feasible by the given vehicle kinematic model + - Planned trajectory must satisfy given lateral acceleration and jerk limit + - Planned trajectory points within _n_ [m] from ego vehicle should not change over time unless sudden steering or sudden acceleration is required to avoid collision with other vehicles. + - _n_[m] = _velocity_of_ego_vehicle_\*_configured_time_horizon_ ## Input @@ -92,17 +100,17 @@ The table below summarizes the final output from Planning stack: | Trajectory | `/planning/trajectory`
(`autoware_planning_msgs::Trajectory`) | This is the sequence of pose that Control stack must follow. This must be smooth, and kinematically possible to follow by the Control stack. | | Turn Signal | `/vehicle/turn_signal_cmd`
(`autoware_vehicle_msgs::TurnSignal`) | This is the output to control turn signals of the vehicle. Planning stack will make sure that turn signal will be turned on according to planned maneuver. | -# Design +## Design -In order to achieve the requirements stated above, Planning stack is decomposed into the diagram below. +In order to achieve the requirements stated above, Planning stack is decomposed into the diagram below. Each requirement is met in following modules: -* Requirement 1: Mission calculates the overall route to reach goal from starting position -* Requirement 2-7: LaneDriving scenario plans trajectory along lanes in planned route -* Requirement 8: Parking scenario plans trajectory in free space to park into parking space -* Requirement 9: Both LaneDriving and Parking should output trajectory that satisfies the requirement +- Requirement 1: Mission calculates the overall route to reach goal from starting position +- Requirement 2-7: LaneDriving scenario plans trajectory along lanes in planned route +- Requirement 8: Parking scenario plans trajectory in free space to park into parking space +- Requirement 9: Both LaneDriving and Parking should output trajectory that satisfies the requirement -We have looked into different autonomous driving stacks and concluded that it is technically difficult to use unified planner to handle every possible situation. (See [here](DesignRationale.md) for more details). Therefore, we have decided to set different planners in parallel dedicated for each use case, and let scenario selector to decide depending on situations. Currently, we have reference implementation with two scenarios, on-road planner and parking planner, but any scenarios (e.g. highway, in-emergency, etc.) can be added as needed. +We have looked into different autonomous driving stacks and concluded that it is technically difficult to use unified planner to handle every possible situation. (See [here](DesignRationale.md) for more details). Therefore, we have decided to set different planners in parallel dedicated for each use case, and let scenario selector to decide depending on situations. Currently, we have reference implementation with two scenarios, on-road planner and parking planner, but any scenarios (e.g. highway, in-emergency, etc.) can be added as needed. It may be controversial whether new scenario is needed or existing scenario should be enhanced when adding new feature, and we still need more investigation to set the definition of “Scenario” module. @@ -111,7 +119,8 @@ It may be controversial whether new scenario is needed or existing scenario shou ## Mission planner ### Role -The role of mission planner is to calculate route that navigates from current vehicle pose to goal pose. The route is made of sequence of lanes that vehicle must follow to reach goal pose. + +The role of mission planner is to calculate route that navigates from current vehicle pose to goal pose. The route is made of sequence of lanes that vehicle must follow to reach goal pose. This module is responsible for calculating full route to goal, and therefore only use static map information. Any dynamic obstacle information (e.g. pedestrians and vehicles) is not considered during route planning. Therefore, output route topic is only published when goal pose is given and will be latched until next goal is provided. @@ -132,10 +141,11 @@ route: `autoware_planning_msgs::Route`
Message type is described below. Rou ![Planning_component](image/PlanningRouteImg.svg) ## Scenario selector + ### Role The role of scenario selector is to select appropriate scenario planner depending on situation. For example, if current pose is within road, then scenario selector should choose on-road planner, and if vehicle is within parking lot, then scenario selector should choose parking scenario. -Note that all trajectory calculated by each scenario module passes is collected by scenario selector, and scenario selector chooses which trajectory to be passed down to Control module. This ensures that trajectory from unselected scenario is not passed down to Control when scenario is changed even if there is a delay when scenario planner receives notification that it is unselected by the scenario selector. +Note that all trajectory calculated by each scenario module passes is collected by scenario selector, and scenario selector chooses which trajectory to be passed down to Control module. This ensures that trajectory from unselected scenario is not passed down to Control when scenario is changed even if there is a delay when scenario planner receives notification that it is unselected by the scenario selector. ### Input @@ -147,9 +157,9 @@ Note that all trajectory calculated by each scenario module passes is collected ### Output - scenario: `autoware_planning_msgs::Scenario`
This contains current available scenario and selected scenario. Each Scenario modules read this topic and chooses to plan trajectory -- Trajectory: `autoware_planning_msgs::Trajectory`
This is the final trajectory of Planning stack, which is the trajectory from selected Scenario module. +- Trajectory: `autoware_planning_msgs::Trajectory`
This is the final trajectory of Planning stack, which is the trajectory from selected Scenario module. -## Scenarios +## Scenarios ### Role diff --git a/docs/design/software_architecture/Planning/ScenarioSelector/ScenarioSelector.md b/docs/design/software_architecture/Planning/ScenarioSelector/ScenarioSelector.md index d05338f6a8559..3c2271a9cc64d 100644 --- a/docs/design/software_architecture/Planning/ScenarioSelector/ScenarioSelector.md +++ b/docs/design/software_architecture/Planning/ScenarioSelector/ScenarioSelector.md @@ -1,5 +1,4 @@ -Scenario Selector -============= +# Scenario Selector ## Role @@ -15,4 +14,4 @@ The role of scenario selector is to select appropriate scenario planner dependin ### Output - scenario: `autoware_planning_msgs::Scenario`
This contains current available scenario and selected scenario. Each Scenario modules read this topic and chooses to plan trajectory -- Trajectory: `autoware_planning_msgs::Trajectory`
This is the final trajectory of Planning stack, which is the trajectory from selected Scenario module. +- Trajectory: `autoware_planning_msgs::Trajectory`
This is the final trajectory of Planning stack, which is the trajectory from selected Scenario module. diff --git a/docs/design/software_architecture/Sensing/Sensing.md b/docs/design/software_architecture/Sensing/Sensing.md index 99341fb8ea733..415248e85fbc1 100644 --- a/docs/design/software_architecture/Sensing/Sensing.md +++ b/docs/design/software_architecture/Sensing/Sensing.md @@ -1,7 +1,6 @@ -Sensing -========= +# Sensing -## Overview +## Overview For autonomous driving, a vehicle needs to be aware of its state and surrounding environment. Sensing stack collects the environment information through various sensors and manipulates data appropriately to be used by other stacks. @@ -10,20 +9,25 @@ For autonomous driving, a vehicle needs to be aware of its state and surrounding ## Role There are two main roles of Sensing stack: + - **Abstraction of sensor data to ROS message** Sensing stack unifies the output format of same type of sensors so that following stacks (e.g. Perception) do not have to be aware of the hardware. - **Preprocessing of sensor data** Raw data from sensors usually contains errors/noise due to hardware limitations. Sensing stack is responsible for removing such inaccuracies as much as possible before distributing sensor outputs to following stacks. Sensing stack may also do extra restructuring/formatting of data for so that there will be no duplicate data preprocessing done in different stacks. ## Use Cases + The use cases of Sensing stack are the followings: + 1. Estimating pose of the vehicle (Localization) 2. Recognizing surrounding objects (Perception) 3. Traffic light recognition (Perception) 4. Recording log data ## Requirements + For the use cases 1-3mentioned above, the actual computation is done in either [Localization](/Sensing/Sensing.md) stack and [Perception](/Perception/Perception.md) stack, but the data used for computation must be provided by Sensing stack. Therefore, requirement of the Sensing stack is: + 1. Sensing stack should provide enough information for selected Localization method. e.g. LiDAR pointcloud, GNSS, and IMU (Use Case 1) 2. Sensing stack should provide enough information for object recognition. e.g. LiDAR pointcloud and camera image (Use Case 2) 3. Sensing stack should provide enough information for traffic light recognition. e.g. camera image (Use Case 3) @@ -33,6 +37,7 @@ For the use cases 1-3mentioned above, the actual computation is done in either [ Since the architecture of [Localization](/Sensing/Sensing.md) stack and [Perception](/Perception/Perception.md) stack leaves choices of using different combinations of algorithms, Autoware does not set any requirements about input sensor configurations. It is the user's responsibility to come up with appropriate sensor configuration to achieve the user's use cases. As a reference, the recommended sensors from past experience with [Autoware.AI](http://autoware.ai/) is listed below: + - LiDAR - It should cover 360 FOV with minimal blind spots - Camera @@ -42,13 +47,14 @@ As a reference, the recommended sensors from past experience with [Autoware.AI]( - GNSS - IMU - Note that recommended sensor configuration may change depending on future enhancement of Localization/Perception algorithms. More details about Autoware's reference platform will be discussed in [here](https://gitlab.com/autowarefoundation/autoware-foundation/-/wikis/autonomy-hardware-working-group). +Note that recommended sensor configuration may change depending on future enhancement of Localization/Perception algorithms. More details about Autoware's reference platform will be discussed in [here](https://gitlab.com/autowarefoundation/autoware-foundation/-/wikis/autonomy-hardware-working-group). ## Input As mentioned above, the combination of sensor inputs can vary depending on user's sensor configurations. Therefore, incoming sensor data can also come in as different formats with various interface(USB, CAN bus, ethernet, etc.). Sensor drivers are usually made specific to the hardware, and Autoware only provides drivers for recommended sensor configuration mentioned above. Autoware does not limit the use of other sensors, but it is user's responsibility to prepare ROS sensor driver for the sensor in such case. ## Output + Since we don't set requirements about the sensor configuration of the vehicle, outputs of Sensing stack also varies. However, output message type must be defined for each type of sensors in order to abstract sensor outputs for other modules, which is one of the roles of Sensing stack. The table below summarizes the output message for recommended sensors. More sensors will be added into above table after appropriate investigation about how the data will be used in the following stack. In general, the final output of Sensing stack should be in [sensor_msgs](http://wiki.ros.org/sensor_msgs) type which is de facto standard in ROS systems. This allows developers to utilize default ROS tools such as RVIZ to visualize outputs. A reason should be provided if any other data type is used. @@ -59,38 +65,40 @@ The table below summarizes the output message for recommended sensors. More sens | Camera | `/sensing/{camera_name}/image`
(`sensor_msgs::Image`)
`/sensing/{camera_name}/camera_info`
(`sensor_msgs::CameraInfo`) | Camera should provide both Image and CameraInfo topics. Image message contains 2D light intensity information (usually RGB). It is commonly used in Perception (Traffic Light Recognition, Object Recognition) and in Localization(Visual Odometry). By convention, image topic must be published in optical frame of the camera.
CameraInfo message contains camera intrinsic parameters which is usually used to fuse pointcloud and image information in Perception stack. | | GNSS | `/sensing/gnss/gnss_pose`
(`geometry_msgs::PoseWithCovariance`) | This contains absolute 3D pose on earth. The output should be converted into map frame to be used in Localization stack. | | IMU | `/sensing/imu/imu_data`
(`sensor_msgs::Imu`) | This contains angular velocity and acceleration. The main use case is Twist estimation for Localization. The output data may also include estimated orientation as an option. | -**rationale**: GNSS data is published as `geometry_msgs::PoseWithCovariance` instead of `sensor_msgs/NavSatFix`. `gometry_msgs` are also one of de facto message type, and PoseWithCovariance message essentially contains the same information and is more convenient for Localization stack(the most likely user of the data) since localization is done in Cartesian coordinate. -# Design +**rationale**: GNSS data is published as `geometry_msgs::PoseWithCovariance` instead of `sensor_msgs/NavSatFix`. `geometry_msgs` are also one of de facto message type, and PoseWithCovariance message essentially contains the same information and is more convenient for Localization stack(the most likely user of the data) since localization is done in Cartesian coordinate. + +## Design + In order to support the requirements, Sensing stack is decomposed as below. Depending on the use case and hardware configuration of the vehicle, users may choose to use a subset of the components stated in the diagram. General convention is that for each sensor, there will be a driver and optionally a preprocessor component. Drivers are responsible for converting sensor data into ROS message and modification of the data during conversion should be minimal. It is preprocessors' responsibility to manipulate sensor data for ease of use. ![Sensing_component](image/SensingComponent.svg) - ## Drivers Driver components act as interface between the hardware and ROS software, and they are responsible for converting sensor data into ROS messages. In order to support Requirement 4, drivers should focus on converting raw data to ROS message with minimal modification as much as possible. Ideally, the output message type of driver should be the same as the final output of Sensing stack, but exceptions are allowed in order to avoid loss of information during conversion or to achieve faster computation time in preprocessor. - -* **LiDAR driver** - * Input: Raw data from LiDAR. Usually, it is list of range information with time stamp. - * Output: `sensor_msgs::PointCloud2` that includes XYZ coordinates in sensor frame. If a single scan of LiDAR contains points with different timestamp, then accurate timestamp should be specified as an additional field for each point. - -* **Camera driver** - * Input: - * Raw data from camera - * Calibration file of the camera that contains intrinsic camera parameter information - * Output: - * Image data in `sensor_msgs::Image`. - * Camera parameter information in`sensor_msgs::CameraInfo`. Although `sensor_msgs::CameraInfo` is not direct output from a camera, these information are published should be published with image since it contains essential information for image processing. -* **GNSS driver** - * Input: Raw data from GNSS. Usually contains latitude and longitude information. - * Output: Output should be in `sensor_msgs::NavSatFix` which contains calculated latitude and longitude information with addition of satellite fix information. -* **IMU driver** - * Input: Raw data from IMU. - * Output: - * measured linear acceleration and angular velocity values in `sensor_msgs::Imu`. - * (Optional) Orientation field in orientation in `sensor_msgs::Imu`. This field should be filled only when orientation is direct output from the hardware (e.g. by using magnetometer). It is very common to estimate orientation from reported linear acceleration and angular velocity, but they must be done in preprocessor module rather than in a driver component. - **rationale**: Estimating orientation in driver makes recorded data ambiguous whether the orientation comes from hardware or from software. + +- **LiDAR driver** + + - Input: Raw data from LiDAR. Usually, it is list of range information with time stamp. + - Output: `sensor_msgs::PointCloud2` that includes XYZ coordinates in sensor frame. If a single scan of LiDAR contains points with different timestamp, then accurate timestamp should be specified as an additional field for each point. + +- **Camera driver** + - Input: + - Raw data from camera + - Calibration file of the camera that contains intrinsic camera parameter information + - Output: + - Image data in `sensor_msgs::Image`. + - Camera parameter information in`sensor_msgs::CameraInfo`. Although `sensor_msgs::CameraInfo` is not direct output from a camera, these information are published should be published with image since it contains essential information for image processing. +- **GNSS driver** + - Input: Raw data from GNSS. Usually contains latitude and longitude information. + - Output: Output should be in `sensor_msgs::NavSatFix` which contains calculated latitude and longitude information with addition of satellite fix information. +- **IMU driver** + - Input: Raw data from IMU. + - Output: + - measured linear acceleration and angular velocity values in `sensor_msgs::Imu`. + - (Optional) Orientation field in orientation in `sensor_msgs::Imu`. This field should be filled only when orientation is direct output from the hardware (e.g. by using magnetometer). It is very common to estimate orientation from reported linear acceleration and angular velocity, but they must be done in preprocessor module rather than in a driver component. + **rationale**: Estimating orientation in driver makes recorded data ambiguous whether the orientation comes from hardware or from software. ## Preprocessors @@ -104,45 +112,48 @@ Preprocessors are responsible for manipulating ROS message data to be more "usef Since the output of preprocessors will the final output of Sensing stack, it must follow the output ROS message type stated above. -* **Pointcloud Preprocessor** - * Possible preprocessing functions: - * Self cropping: Removal of detected points from ego vehicle. - * Distortion correction: Compensation of ego vehicle's movement during 1 scan
**rationale**: This may cause inaccuracy in reported shape/position relative to the sensor origin. - * Outlier filter: Most LiDAR data contains random noise due to hardware constraints. Detected points from flying insects or rain drops are usually considered as noise. - * Concatenation: Combine points from multiple LiDAR outputs - * Ground filter: Removing grounds from pointcloud for easier object detection - * Multiplexer: Selecting pointclouds from LiDAR that is specific for certain use case - * Input: ROS message from the LiDAR driver. There may be multiple inputs if the vehicle has multiple LiDARs. - * Output: PointCloud preprocessor may output multiple topics in sensor_msgs::PointCloud2 depending on the use case. Some examples may be: - * Concatenated pointcloud: Pointcloud from all available LiDARs may have less blind spots - * Pointcloud without ground points: ground is usually out of interest when detecting obstacles, which helps perception. - -* **Camera Preprocessor** - * Possible preprocessing functions: - * Rectification - * Resizing - * Input: - * `sensor_msgs::Image` image data from driver - * `sensor_msgs::CameraInfo` from driver - * Output: The preprocessor may have multiple outputs depending on the selected hardware and selected algorithms in perception/localization. Some examples might be: - * rectified image: It is possible to rectify image using `sensor_msgs::CameraInfo` so that cameras can be treated as a [pinhole camera model](https://www.sciencedirect.com/topics/engineering/pinhole-camera-model), which is useful for projecting 3D information into 2D image( or vice versa). This enables fusion of sensor data in Perception stack to improve perception result. - * resized image: Smaller images might be useful to fasten computation time. - * `sensor_msgs::CameraInfo`: Camera preprocessor should relay camera information driver node without modifying any values since all parameters should be constant. - -* **GNSS Preprocessor** - * Possible preprocessing functions: - * conversion of (latitude, longitude, altitude) to (x,y,z) in map coordinate - * (Optional) Deriving orientation using multiple GNSS inputs - * (Optional) Filter out unreliable data - * Input: `sensor_msgs::NavSatFix` message from driver. - * Output: Pose in `geometry_msgs::PoseWithCovariance`. Unreliable data can also be filtered out based on satellite fix information. Each fields in the message should be calculated as following: - * Pose: This must be projected into map frame from latitude and longitude information - * Orientation: This should be derived from calculating changes in position over time or by using multiple GNSS sensors on vehicle. - * Covariance: Covariance should reflect reliability of GNSS output. It may be relaying covariance from the input or reflect satellite fix status. - -* **IMU Preprocessor** - * Possible preprocessing functions: - * Bias removal - * orientation estimation - * Input: `sensor_msgs::Imu` topic from IMU drivers. - * Output: preprocessed `sensor_msgs::Imu` either relayed or modified from the input with functions stated above. Modification depends on hardware specification of IMU, and requirements from Localization algorithm. +- **Pointcloud Preprocessor** + + - Possible preprocessing functions: + - Self cropping: Removal of detected points from ego vehicle. + - Distortion correction: Compensation of ego vehicle's movement during 1 scan
**rationale**: This may cause inaccuracy in reported shape/position relative to the sensor origin. + - Outlier filter: Most LiDAR data contains random noise due to hardware constraints. Detected points from flying insects or rain drops are usually considered as noise. + - Concatenation: Combine points from multiple LiDAR outputs + - Ground filter: Removing grounds from pointcloud for easier object detection + - Multiplexer: Selecting pointclouds from LiDAR that is specific for certain use case + - Input: ROS message from the LiDAR driver. There may be multiple inputs if the vehicle has multiple LiDARs. + - Output: PointCloud preprocessor may output multiple topics in sensor_msgs::PointCloud2 depending on the use case. Some examples may be: + - Concatenated pointcloud: Pointcloud from all available LiDARs may have less blind spots + - Pointcloud without ground points: ground is usually out of interest when detecting obstacles, which helps perception. + +- **Camera Preprocessor** + + - Possible preprocessing functions: + - Rectification + - Resizing + - Input: + - `sensor_msgs::Image` image data from driver + - `sensor_msgs::CameraInfo` from driver + - Output: The preprocessor may have multiple outputs depending on the selected hardware and selected algorithms in perception/localization. Some examples might be: + - rectified image: It is possible to rectify image using `sensor_msgs::CameraInfo` so that cameras can be treated as a [pinhole camera model](https://www.sciencedirect.com/topics/engineering/pinhole-camera-model), which is useful for projecting 3D information into 2D image( or vice versa). This enables fusion of sensor data in Perception stack to improve perception result. + - resized image: Smaller images might be useful to fasten computation time. + - `sensor_msgs::CameraInfo`: Camera preprocessor should relay camera information driver node without modifying any values since all parameters should be constant. + +- **GNSS Preprocessor** + + - Possible preprocessing functions: + - conversion of (latitude, longitude, altitude) to (x,y,z) in map coordinate + - (Optional) Deriving orientation using multiple GNSS inputs + - (Optional) Filter out unreliable data + - Input: `sensor_msgs::NavSatFix` message from driver. + - Output: Pose in `geometry_msgs::PoseWithCovariance`. Unreliable data can also be filtered out based on satellite fix information. Each fields in the message should be calculated as following: + - Pose: This must be projected into map frame from latitude and longitude information + - Orientation: This should be derived from calculating changes in position over time or by using multiple GNSS sensors on vehicle. + - Covariance: Covariance should reflect reliability of GNSS output. It may be relaying covariance from the input or reflect satellite fix status. + +- **IMU Preprocessor** + - Possible preprocessing functions: + - Bias removal + - orientation estimation + - Input: `sensor_msgs::Imu` topic from IMU drivers. + - Output: preprocessed `sensor_msgs::Imu` either relayed or modified from the input with functions stated above. Modification depends on hardware specification of IMU, and requirements from Localization algorithm. diff --git a/docs/design/software_architecture/TF.md b/docs/design/software_architecture/TF.md index ac68e0ad32cb8..536ef1f4c200c 100644 --- a/docs/design/software_architecture/TF.md +++ b/docs/design/software_architecture/TF.md @@ -1,15 +1,18 @@ # TF tree in Autoware + Autoware uses the ROS TF library for transforming coordinates. The Autoware TF tree can be accessed from any module within Autoware, and is illustrated below. ![TF](image/TF.svg) ## Frames -* earth: the origin of the [ECEF coordinate system](https://en.wikipedia.org/wiki/ECEF) (i.e. the center of the Earth). Although this frame is not currently used by any modules (and so is not shown in the diagram above), it was added to support the use of larger maps and multiple vehicles in the future. -* map: Local [ENU](http://www.dirsig.org/docs/new/coordinates.html) coordinate. This keeps the xy-plane relatively parallel to the ground surface (thus the z-axis points upwards). All map information should be provided in this frame, or provided in a frame that can be statically transformed into this frame, since most planning calculations are done in this frame. -* base_link: A frame that is rigidly attached to the vehicle. Currently, this is defined as the midpoint of the rear wheels projected to the ground. -* sensor_frame(s): One or more frames that represent the position of individual sensors. The actual name of each frame should be a combination of the name of the sensor and its position relative to the vehicle, such as `camera_front`, `gnss_back` and `lidar_top`. Note that a camera should provide both camera frame and camera optical frame as suggested in [REP-103](https://www.ros.org/reps/rep-0103.html) (eg: 'camera_front' and 'camera_front_optical'). + +- earth: the origin of the [ECEF coordinate system](https://en.wikipedia.org/wiki/ECEF) (i.e. the center of the Earth). Although this frame is not currently used by any modules (and so is not shown in the diagram above), it was added to support the use of larger maps and multiple vehicles in the future. +- map: Local [ENU](http://www.dirsig.org/docs/new/coordinates.html) coordinate. This keeps the xy-plane relatively parallel to the ground surface (thus the z-axis points upwards). All map information should be provided in this frame, or provided in a frame that can be statically transformed into this frame, since most planning calculations are done in this frame. +- base_link: A frame that is rigidly attached to the vehicle. Currently, this is defined as the midpoint of the rear wheels projected to the ground. +- sensor_frame(s): One or more frames that represent the position of individual sensors. The actual name of each frame should be a combination of the name of the sensor and its position relative to the vehicle, such as `camera_front`, `gnss_back` and `lidar_top`. Note that a camera should provide both camera frame and camera optical frame as suggested in [REP-103](https://www.ros.org/reps/rep-0103.html) (eg: 'camera_front' and 'camera_front_optical'). ## Transforms + | TF | Type | Providing Module | Description | | ----------------- | ------- | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | earth->map | static | Map | Map modules will provide this TF according to an origin information parameter file. | @@ -17,56 +20,66 @@ Autoware uses the ROS TF library for transforming coordinates. The Autoware TF t | base_link->sensor | static | Vehicle | The Vehicle module provide sensor positions relative to the base_link using URDF. There may be multiple static transforms between the base_link and a sensor frame. For example, if a camera is calibrated against a LiDAR, then the camera's TF can be expressed by base_link->lidar->camera. | Remarks: -* A static frame does not mean it does not change. It may be remapped at times, but no interpolation will be done between the change (i.e. only the newest information is used). -* base_link->sensor is assumed to be static. In reality, this is not true due to vehicle suspension, but we assume that the displacement is small enough that it doesn't affect control of the vehicle. There is a [discussion](https://gitlab.com/autowarefoundation/autoware.auto/AutowareAuto/-/issues/292) about a new nav_base frame that resolves this issue, and this new frame may be integrated at some later point. -* The specification above is not meant to restrict the addition of other frames. Developers may add any additional frames as required, but the meaning of existing frames as described above must not be changed. + +- A static frame does not mean it does not change. It may be remapped at times, but no interpolation will be done between the change (i.e. only the newest information is used). +- base_link->sensor is assumed to be static. In reality, this is not true due to vehicle suspension, but we assume that the displacement is small enough that it doesn't affect control of the vehicle. There is a [discussion](https://gitlab.com/autowarefoundation/autoware.auto/AutowareAuto/-/issues/292) about a new nav_base frame that resolves this issue, and this new frame may be integrated at some later point. +- The specification above is not meant to restrict the addition of other frames. Developers may add any additional frames as required, but the meaning of existing frames as described above must not be changed. ## Regarding REP105 -For TF, ROS follows the naming conventions and semantic meanings in [REP-105](https://www.ros.org/reps/rep-0105.html -). The explanation given above also follows REP-105, but with the significant change of removing the odom frame. + +For TF, ROS follows the naming conventions and semantic meanings in [REP-105](https://www.ros.org/reps/rep-0105.html). The explanation given above also follows REP-105, but with the significant change of removing the odom frame. ### What is the odom frame? + The odom frame is defined as follows in REP-105: -``` + +```txt The coordinate frame called odom is a world-fixed frame. The pose of a mobile platform in the odom frame can drift over time, without any bounds. This drift makes the odom frame useless as a long-term global reference. However, the pose of a robot in the odom frame is guaranteed to be continuous, meaning that the pose of a mobile platform in the odom frame always evolves in a smooth way, without discrete jumps. In a typical setup the odom frame is computed based on an odometry source, such as wheel odometry, visual odometry or an inertial measurement unit. The odom frame is useful as an accurate, short-term local reference, but drift makes it a poor frame for long-term reference. ``` + There have been [some](https://discourse.ros.org/t/localization-architecture/8602/28) [discussions](https://gitlab.com/autowarefoundation/autoware.auto/AutowareAuto/issues/292) about the purpose of the odom frame, and the main reasons for using it are as follows: -* odom->base_link is high-frequency and therefore suitable for control -* odom->base_link is continuous and keeps control from "jerks" -* odom->base_link is independent of localization, and so it is still safe to use in the event of localization failure so long as control is done in the odom frame. + +- odom->base_link is high-frequency and therefore suitable for control +- odom->base_link is continuous and keeps control from "jerks" +- odom->base_link is independent of localization, and so it is still safe to use in the event of localization failure so long as control is done in the odom frame. ### Why we removed the odom frame -Tier IV's view is that control *is* affected by localization, even if trajectory following is done only in the odom frame. For example, if a trajectory is calculated from the shape of the lane specified in an HD map, the localization result will be indirectly used when projecting the trajectory into the odom frame, and thus the trajectory calculation will be thrown off if localization fails. Also, any map-based calculations that are done before trajectory calculation, such as shape optimization using the map's drivable areas or velocity optimization using the predicted trajectory of other vehicles (derived from lane shape information) will also be affected by localization failure. + +Tier IV's view is that control _is_ affected by localization, even if trajectory following is done only in the odom frame. For example, if a trajectory is calculated from the shape of the lane specified in an HD map, the localization result will be indirectly used when projecting the trajectory into the odom frame, and thus the trajectory calculation will be thrown off if localization fails. Also, any map-based calculations that are done before trajectory calculation, such as shape optimization using the map's drivable areas or velocity optimization using the predicted trajectory of other vehicles (derived from lane shape information) will also be affected by localization failure. In order to ensure that control is unaffected by localization failure, we require that all preceding calculations do not use the map->odom transform. However, since trajectory following comes after planning in Autoware, it is almost impossible to prevent map->odom from being involved in trajectory calculations. Although this might be possible if Autoware planned like a human (who only uses route graph information from the map and can obtain geometry information from perception), it is very unlikely that an autonomous driving stack is capable of ensuring safety without using geometry information. Therefore, regardless of the frame in which it is done, control will still be affected by localization. To control a vehicle without jerking or sudden steering in the event of localization failure, we set the following requirements for the Localization module: -* Localization results must be continuous -* Localization failures must be detected and the vehicle's pose should not be updated with any failed localization results + +- Localization results must be continuous +- Localization failures must be detected and the vehicle's pose should not be updated with any failed localization results This new Localization architecture assumes that twist and pose will be integrated with a sequential Bayesian Filter such as [EKF](https://en.wikipedia.org/wiki/Extended_Kalman_filter) or a [particle filter](https://en.wikipedia.org/wiki/Particle_filter). Additionally, the new architecture is able to integrate odometry information directly from sensor data (currently IMU and vehicle speed sensors only, but GNSS and doppler sensor data may be added in future) and is able to update TF smoothly and continuously at high frequency. As a result, all of the merits of odom->base_link stated in REP-105 can be satisfied by map->base_link in this new architecture and so there is no need to set the odom frame. To conclude, we removed the odom frame from this architecture proposal for the following reasons: + 1. It is almost impossible to avoid using map information in the Planning module, and thus the trajectory will have a dependency on map->odom (or map->base_link) 2. Therefore, to maintain safe control even after a localization failure, the following conditions must be satisfied by the Localization module: 1. It must be able to detect localization failures - 2. When a failure is detected, map->odom should not be updated + 2. When a failure is detected, map->odom should not be updatedD:wq 3. If the Localization module can satisfy the above conditions, then there is no benefit in using odom->base_link, and all modules should use map->base_link instead whenever they need a world-fixed frame. ### Possible Concerns -* The argument above focuses on replacing map->odom->base_link with map->base_link, but doesn't prove that map->base_link is better. If we set odom->base_link, wouldn't we have more frame options available? - * Once we split map->base_link into map->odom and odom->base_link, we lose velocity information and uncertainty (covariance) information between them. We can expect more robustness if we integrate all information (odometry and output of multiple localization) at once. - * We have to wait for both transforms in order to obtain the map->base_link transform. However, it is easier to estimate delay time if map->base_link is combined into one TF. - * Creating the odom frame within this new architecture should be possible, but first one has to consider if there are any components that actually need to use odom->base_link. For example, we anticipate that odom->base_link might be needed when designing a safety architecture (out-of-scope for this proposal). However, it should only be added after a comprehensive safety analysis has been completed since [we already know that using the odom frame in the Control module does not ensure safety](#why-we-removed-the-odom-frame). -* Most ROS localization nodes assume odom->base_link to calculate map->odom. Wouldn't it be impossible to utilize such nodes without the odom frame? - * It is very unlikely that a single algorithm supports all use cases in all environments. We need a module that integrates output from different localization algorithms, but most third-party localization nodes are not made with consideration for integration modules. Therefore, we still need to make changes when using third-party software anyway. Technically, REP-105 is integrating the odometry and localization results by sequentially connecting TFs, but this is not realistic when we add more algorithms. We should be thinking of a way to integrate localization methods in parallel, and this new architecture was made to support such a use case. + +- The argument above focuses on replacing map->odom->base_link with map->base_link, but doesn't prove that map->base_link is better. If we set odom->base_link, wouldn't we have more frame options available? + - Once we split map->base_link into map->odom and odom->base_link, we lose velocity information and uncertainty (covariance) information between them. We can expect more robustness if we integrate all information (odometry and output of multiple localization) at once. + - We have to wait for both transforms in order to obtain the map->base_link transform. However, it is easier to estimate delay time if map->base_link is combined into one TF. + - Creating the odom frame within this new architecture should be possible, but first one has to consider if there are any components that actually need to use odom->base_link. For example, we anticipate that odom->base_link might be needed when designing a safety architecture (out-of-scope for this proposal). However, it should only be added after a comprehensive safety analysis has been completed since [we already know that using the odom frame in the Control module does not ensure safety](#why-we-removed-the-odom-frame). +- Most ROS localization nodes assume odom->base_link to calculate map->odom. Wouldn't it be impossible to utilize such nodes without the odom frame? + - It is very unlikely that a single algorithm supports all use cases in all environments. We need a module that integrates output from different localization algorithms, but most third-party localization nodes are not made with consideration for integration modules. Therefore, we still need to make changes when using third-party software anyway. Technically, REP-105 is integrating the odometry and localization results by sequentially connecting TFs, but this is not realistic when we add more algorithms. We should be thinking of a way to integrate localization methods in parallel, and this new architecture was made to support such a use case. ## Reference -* REP105: https://www.ros.org/reps/rep-0105.html -* REP103: https://www.ros.org/reps/rep-0103.html -* TF discussion on ROS Discourse: https://discourse.ros.org/t/localization-architecture/8602/28 -* TF discussion in Autoware.Auto (GitLab): https://gitlab.com/autowarefoundation/autoware.auto/AutowareAuto/issues/292 + +- REP105: +- REP103: +- TF discussion on ROS Discourse: +- TF discussion in Autoware.Auto (GitLab): diff --git a/docs/design/software_architecture/Vehicle/Vehicle.md b/docs/design/software_architecture/Vehicle/Vehicle.md index fdbdc47bd05c6..76c6eb18f787f 100644 --- a/docs/design/software_architecture/Vehicle/Vehicle.md +++ b/docs/design/software_architecture/Vehicle/Vehicle.md @@ -1,12 +1,10 @@ # Vehicle -# Overview +## Overview -Vehicle stack is an interface between Autoware and vehicle. This layer converts signals from Autoware to vehicle-specific, and vice versa. +Vehicle stack is an interface between Autoware and vehicle. This layer converts signals from Autoware to vehicle-specific, and vice versa. This module needs to be designed according to the vehicle to be used. How to implement a new interface is described [below.](#how-to-design-a-new-vehicle-interface) - - ## Role There are two main roles of Vehicle stack: @@ -24,48 +22,49 @@ It is assumed that the vehicle has one of the following control interfaces. The use case and requirements change according to this type. - ## Use Cases Vehicle stack supports the following use cases. - - Speed control with desired velocity or acceleration (for type A only) - - Speed control with desired throttle and brake pedals (for type B only) - - Steering control with desired steering angle and/or steering angle velocity (for both) - - Shift control (for both) - - Turn signal control (for both) - +- Speed control with desired velocity or acceleration (for type A only) +- Speed control with desired throttle and brake pedals (for type B only) +- Steering control with desired steering angle and/or steering angle velocity (for both) +- Shift control (for both) +- Turn signal control (for both) ## Requirement To achieve the above use case, the vehicle stack requires the following conditions. -**Speed control with desired velocity or acceleration (for type A)** - - The vehicle can be controlled by the target velocity or acceleration. - - The input vehicle command includes target velocity or acceleration. - - The output to the vehicle includes desired velocity or acceleration in a vehicle-specific format. +### Speed control with desired velocity or acceleration (for type A) + +- The vehicle can be controlled by the target velocity or acceleration. +- The input vehicle command includes target velocity or acceleration. +- The output to the vehicle includes desired velocity or acceleration in a vehicle-specific format. + +### Speed control with the desired throttle and brake pedals (for type B) -**Speed control with the desired throttle and brake pedals (for type B)** - - The vehicle can be controlled by the target throttle and brake pedals. - - The input vehicle command includes target throttle and brake pedals for the desired speed. - - The output to the vehicle includes desired throttle and brake pedals in a vehicle-specific format. +- The vehicle can be controlled by the target throttle and brake pedals. +- The input vehicle command includes target throttle and brake pedals for the desired speed. +- The output to the vehicle includes desired throttle and brake pedals in a vehicle-specific format. -**Steering control with the desired steering angle and/or steering angle velocity** - - The vehicle can be controlled by the target steer angle and/or steering angle velocity. - - The input vehicle command includes the target steering angle and/or target steering angle velocity. - - The output to the vehicle includes the desired steering angle and/or steering angle velocity in a vehicle-specific format. +### Steering control with the desired steering angle and/or steering angle velocity +- The vehicle can be controlled by the target steer angle and/or steering angle velocity. +- The input vehicle command includes the target steering angle and/or target steering angle velocity. +- The output to the vehicle includes the desired steering angle and/or steering angle velocity in a vehicle-specific format. -**Shift control** - - The vehicle can be controlled by the target shift mode. - - The input vehicle command includes the desired shift. - - The output to the vehicle includes the desired shift in a vehicle-specific format. +### Shift control +- The vehicle can be controlled by the target shift mode. +- The input vehicle command includes the desired shift. +- The output to the vehicle includes the desired shift in a vehicle-specific format. -**Turn signal control** - - The vehicle can be controlled by the target turn signal mode. - - The input vehicle command includes the desired turn signal. - - The output to the vehicle includes the desired turn signal in a vehicle-specific format. +### Turn signal control + +- The vehicle can be controlled by the target turn signal mode. +- The input vehicle command includes the desired turn signal. +- The output to the vehicle includes the desired turn signal in a vehicle-specific format. ## Input @@ -86,8 +85,6 @@ The detailed contents in Vehicle Command are as follows. | Gear shifting command | std_msgs/Int32 | Target Gear shift | | Emergency command | std_msgs/Int32 | Emergency status of Autoware | - - ### Output There are two types of outputs from Vehicle stack: vehicle status to Autoware and a control command to the vehicle. @@ -101,29 +98,23 @@ The table below summarizes the output from Vehicle stack: | Shift status (optional) | `/vehicle/status/Shift`
(`autoware_vehicle_msgs/ShiftStamped`) | vehicle shift to Autoware [-] | | Turn signal status (optional) | `/vehicle/status/turn_signal`
(`autoware_vehicle_msgs/TurnSignal`) | vehicle turn signal status to Autoware [m/s] | - The output to the vehicle depends on each vehicle interface. | Output (to vehicle) | Topic(Data Type) | Explanation | | ------------------------ | ----------------------- | ------------------------------------ | | vehicle control messages | Depends on each vehicle | Control signals to drive the vehicle | - - - -# Design +## Design For vehicles of the type controlled by the target velocity or acceleration (type A) ![Vehicle_design_typeA](image/VehicleInterfaceDesign1.png) - For vehicles of the type controlled by the target throttle and brake pedals (type B) ![Vehicle_design_typeB](image/VehicleInterfaceDesign2.png) - -## Vehicle Interface +## Vehicle Interface ### Role @@ -150,7 +141,6 @@ NOTE: Lane driving is possible without the optional part. Design vehicle interfa ### Role - To convert the target acceleration to the target throttle and brake pedals with the given acceleration map. This node is used only for the case of vehicle type B. ### Input @@ -163,43 +153,40 @@ To convert the target acceleration to the target throttle and brake pedals with - Raw Vehicle Command (`autoware_vehicle_msgs/RawVehicleCommand`) - includes target throttle pedal, brake pedal, steering angle, steering angle velocity, gear shift, and emergency. -# How to design a new vehicle interface +### How to design a new vehicle interface -**For type A** +#### For type A Create a module that satisfies the following two requirements - - Receives `autoware_vehicle_msg/VehicleCommand` and sends control commands to the vehicle. - - Converts the information from the vehicle, publishes vehicle speed to Autoware with `geometry_msgs/TwistStamed`. - -For example, if the vehicle has an interface to be controlled with a target velocity, the velocity in `autoware_vehicle_msg/VehicleCommand` is sent to the vehicle as the target velocity. If the vehicle control interface is steering wheel angle, it is necessary to convert steering angle to steering wheel angle in this vehicle_interface. +- Receives `autoware_vehicle_msg/VehicleCommand` and sends control commands to the vehicle. +- Converts the information from the vehicle, publishes vehicle speed to Autoware with `geometry_msgs/TwistStamped`. +For example, if the vehicle has an interface to be controlled with a target velocity, the velocity in `autoware_vehicle_msg/VehicleCommand` is sent to the vehicle as the target velocity. If the vehicle control interface is steering wheel angle, it is necessary to convert steering angle to steering wheel angle in this vehicle_interface. -**For type B** +#### For type B Since `autoware_vehicle_msg/VehicleCommand` contains only the target velocity and acceleration, you need to convert these values for the throttle and brake pedal interface vehicles. In this case, use the `RawVehicleCmdConverter`. The `RawVehicleCmdConverter` converts the target acceleration to the target throttle/brake pedal based on the given acceleration map. You need to create this acceleration map in advance from vehicle data sheets and experiments. - With the use of `RawVehicleCmdConverter`, you need to create a module that satisfies the following two requirements - - Receives `autoware_vehicle_msg/RawVehicleCommand` and sends control commands to the vehicle. - - Converts the information from the vehicle, publishes vehicle speed to Autoware with `geometry_msgs/TwistStamed`. +- Receives `autoware_vehicle_msg/RawVehicleCommand` and sends control commands to the vehicle. +- Converts the information from the vehicle, publishes vehicle speed to Autoware with `geometry_msgs/TwistStamped`. -**How to make an acceleration map (for type B)** +#### How to make an acceleration map (for type B) When using the `RawVehicleCmdConverter` described above, it is necessary to create an acceleration map for each vehicle. The acceleration map is data in CSV format that describes how much acceleration is produced when the pedal pressed in each vehicle speed range. You can find the default acceleration map data in `src/vehicle/raw_vehicle_cmd_converter/data` as a reference. In the CSV data, the horizontal axis is the current velocity [m/s], the vertical axis is the vehicle-specific pedal value [-], and the element is the acceleration [m/ss] as described below. ![Vehicle_accel_map_description](image/VehicleAccelMapDescription.png) - This is the reference data created by TierIV with the following steps. - - - Press the pedal to a constant value on a flat road to accelerate/decelerate the vehicle. - - Save IMU acceleration and vehicle velocity data during acceleration/deceleration. - - Create a CSV file with the relationship between pedal values and acceleration at each vehicle speed. -After your acceleration map is created, load it when `RawVehicleCmdConverter` is launched (the file path is defined at the launch file). +- Press the pedal to a constant value on a flat road to accelerate/decelerate the vehicle. +- Save IMU acceleration and vehicle velocity data during acceleration/deceleration. +- Create a CSV file with the relationship between pedal values and acceleration at each vehicle speed. + +After your acceleration map is created, load it when `RawVehicleCmdConverter` is launched (the file path is defined at the launch file). -**Control of additional elements, such as turn signals** +#### Control of additional elements, such as turn signals If you need to control parts that are not related to the vehicle drive (turn signals, doors, window opening and closing, headlights, etc.), the vehicle interface will handle them separately. The current Autoware supports and implements only turn signals. diff --git a/docs/developper_guide/CodingGuideline.md b/docs/developper_guide/CodingGuideline.md index 74c54432e35f2..9b189c25928f2 100644 --- a/docs/developper_guide/CodingGuideline.md +++ b/docs/developper_guide/CodingGuideline.md @@ -1,2 +1,3 @@ # TierIV Coding Guideline + See [confluence page](https://tier4.atlassian.net/wiki/spaces/AIP/pages/1194394777/T4) diff --git a/docs/developper_guide/PullRequestGuideline.md b/docs/developper_guide/PullRequestGuideline.md index 19d66cb9676db..b6928c17d3c2c 100644 --- a/docs/developper_guide/PullRequestGuideline.md +++ b/docs/developper_guide/PullRequestGuideline.md @@ -1,2 +1,3 @@ # TierIV Pull Request Guideline + See [confluence page](https://tier4.atlassian.net/wiki/spaces/AIP/pages/1762395113/T4+Pull+Request) diff --git a/docs/developper_guide/UnitTestGuideline.md b/docs/developper_guide/UnitTestGuideline.md index 9dc7250260813..a22918aa28879 100644 --- a/docs/developper_guide/UnitTestGuideline.md +++ b/docs/developper_guide/UnitTestGuideline.md @@ -1,2 +1,3 @@ # TierIV Unit Test Guideline + See [confluence page](https://tier4.atlassian.net/wiki/spaces/AIP/pages/1400045921/T4) diff --git a/docs/developper_guide/knowhow/PortingToROS2.md b/docs/developper_guide/knowhow/PortingToROS2.md index 1129fd7756340..19a1911ad39e1 100644 --- a/docs/developper_guide/knowhow/PortingToROS2.md +++ b/docs/developper_guide/knowhow/PortingToROS2.md @@ -1,82 +1,99 @@ -Porting ROS1 code to ROS2 -======================= +# Porting ROS1 code to ROS2 ## Setting up the environment + Do the following to setup and start ADE environment 1. Setup ade home -``` -$ cd ~ -$ mkdir ade-home -$ cd ade-home -$ touch .adehome + +```sh +cd ~ +mkdir ade-home +cd ade-home +touch .adehome ``` 2. Setup AutowareArchitectureProposal -``` -$ git clone https://github.com/tier4/AutowareArchitectureProposal -$ cd AutowareArchitectureProposal -$ git checkout ros2 + +```sh +git clone https://github.com/tier4/AutowareArchitectureProposal +cd AutowareArchitectureProposal +git checkout ros2 ``` 3. enter ADE -``` -$ cd ~/ade-home/AutowareArchitectureProposal -$ ade start --update --enter -$ cd AutowareArchitectureProposal + +```sh +cd ~/ade-home/AutowareArchitectureProposal +ade start --update --enter +cd AutowareArchitectureProposal ``` All commands that follow are to be entered in ADE. Next step is to fetch the sub-repos: - cd ~/AutowareArchitectureProposal - mkdir src - vcs import src < autoware.proj.repos - rosdep update - rosdep install -y --from-paths src --ignore-src --rosdistro foxy - colcon build --event-handlers console_cohesion+ +```sh +cd ~/AutowareArchitectureProposal +mkdir src +vcs import src < autoware.proj.repos +rosdep update +rosdep install -y --from-paths src --ignore-src --rosdistro foxy +colcon build --event-handlers console_cohesion+ +``` For instance, the `shift_decider` package is in the repository `github.com:tier4/pilot.auto.git`, which is now in the `autoware/pilot.auto` subdirectory. Now branch off `ros2` inside that subdirectory and delete the `COLCON_IGNORE` file in the package you want to port. - ## Important changes + The best source on migrating is the [migration guide](https://index.ros.org/doc/ros2/Contributing/Migration-Guide/). It doesn't mention everything though, so this section lists some areas with important changes. A good general strategy is to try to implement those changes, then iteratively run `colcon build --packages-up-to ` and fix the first compiler error. - ### Rewriting `package.xml` + The migration guide covers this well. See also [here](https://www.ros.org/reps/rep-0149.html) for a reference of the most recent version of this format. #### When to use which dependency tag + Any build tool needed only to set up the build needs `buildtool_depend`; e.g., - ament_cmake - rosidl_default_generators +```xml +ament_cmake +rosidl_default_generators +``` Any external package included with `#include` in the files (source or headers) needs to have a corresponding ``; e.g., - logging +```xml +logging +``` Any external package included with `#include` in the header files also needs to have a corresponding ``; e.g., - eigen +```xml +eigen +``` Any shared library that needs to be linked when the code is executed needs to have a corresponding ``: this describes the runtime dependencies; e.g., - std_msgs - rosidl_default_runtime +```xml +std_msgs +rosidl_default_runtime +``` If a package falls under all three categories (``, ``, and ``), it is possible to just use `` - shift_decider - +```xml +shift_decider +``` ### Rewriting `CMakeLists.txt` + This is not always straightforward. A starting point is to look at the [pub-sub tutorial](https://index.ros.org/doc/ros2/Tutorials/Writing-A-Simple-Cpp-Publisher-And-Subscriber/#cpppubsub). This uses `ament_cmake`, which has a [relatively good guide](https://index.ros.org/doc/ros2/Tutorials/Ament-CMake-Documentation/) that still doesn't cover everything (like what to do when installing an executable). #### `ament_cmake` and `ament_cmake_auto` + One drawback of `ament_cmake` is that it requires typing out the dependencies at least twice, once in `package.xml` and once or more in `CMakeLists.txt`. Another possibility is to use `ament_auto` to get terse `CMakeLists.txt`. See [this commit](https://github.com/tier4/Pilot.Auto/pull/7/commits/ef382a9b430fd69cb0a0f7ca57016d66ed7ef29d) for an example. Unfortunately, there is no documentation for this tool, so you can only learn it from examples and reading the source code. It is also limited in what it does – it cannot currently generate message definitions, for instance, and always links all dependencies to all targets. @@ -92,6 +109,7 @@ ament_auto_find_build_dependencies(REQUIRED ``` #### C++ standard + Add the following to ensure that a specific standard is required and extensions are not allowed ```cmake @@ -103,6 +121,7 @@ endif() ``` #### Compiler flags + Make sure that flags are added only for specific compilers. Not everyone uses `gcc` or `clang`. ```cmake @@ -112,6 +131,7 @@ endif() ``` #### Linters + Add only `ament_cmake_cppcheck` to the list of linters in `package.xml` ```xml @@ -147,40 +167,46 @@ colcon test --packages-select && colcon test-result --verbose ``` ### Replacing `std_msgs` -In ROS2, you should define semantically meaningful wrappers around primitive (number) types. They are deprecated in Foxy. +In ROS2, you should define semantically meaningful wrappers around primitive (number) types. They are deprecated in Foxy. ### Changing the namespaces and header files for generated message types + If you follow the migration guide and change the included headers to have an extra `/msg` in the path and convert to `snake_case`, you might get a cryptic error. Turns out _two_ files are being generated: One for C types (`.h` headers) and one for CPP types (`.hpp` headers). So don't forget to change `.h` to `.hpp` too. Also, don't forget to insert an additional `::msg` between the package namespace and the class name. A tip: Sublime Text has a handy "Case Conversion" package for converting to snake case. - ### Adapting message definitions -If your message included something like `Header header`, it needs to be changed to `std_msgs/Header header`. Otherwise you'll see messages like `fatal error: autoware_api_msgs/msg/detail/header__struct.hpp: No such file or directory`. +If your message included something like `Header header`, it needs to be changed to `std_msgs/Header header`. Otherwise you'll see messages like `fatal error: autoware_api_msgs/msg/detail/header__struct.hpp: No such file or directory`. ### Inheriting from Node instead of NodeHandle members -That's where differences start to show – I decided to make the `VehicleCmdGate` a `Node` even though the filename would suggest that `vehicle_cmd_gate_node.cpp` would be it. That's because it has publishers, subscribers, and logging. It previously had _two_ NodeHandles, a public one and a private one (`"~"`). The public one was unused and could be removed. Private nodes are not supported in ROS2, so I simply made it public, but that is an area that needs to be brought up in review. +That's where differences start to show – I decided to make the `VehicleCmdGate` a `Node` even though the filename would suggest that `vehicle_cmd_gate_node.cpp` would be it. That's because it has publishers, subscribers, and logging. It previously had _two_ NodeHandles, a public one and a private one (`"~"`). The public one was unused and could be removed. Private nodes are not supported in ROS2, so I simply made it public, but that is an area that needs to be brought up in review. ### Latched topics + For each latched publisher, you can use `transient_local` durability QoS on the publisher, e.g. when the history depth is 1: - rclcpp::QoS durable_qos{1}; - durable_qos.transient_local(); +```cpp +rclcpp::QoS durable_qos{1}; +durable_qos.transient_local(); +``` or - rclcpp::QoS durable_qos = rclcpp::QoS(1).transient_local(); +```cpp +rclcpp::QoS durable_qos = rclcpp::QoS(1).transient_local(); +``` However, all subscribers to that topic will also need `transient_local` durability. If this is omitted, the connection between the two will be negotiated to be volatile, i.e. old messages will not be delivered when the subscriber comes online. - ### Timing issues + First, if the timer can be replaced with a data-driven pattern, it is the preferred alternative for the long term: #### The Problem with Timer-Driven Patterns + It is well understood that a polling or timer-driven pattern increases jitter (i.e. variance of latency). (Consider, for example: if every data processing node in a chain operates on a timer what is the best and worst case latency?) As a consequence for more timing-sensitive applications, it is generally not preferred to use a timer-driven pattern. On top of this, it is also reasonably well known that [use of the clock is nondeterministic](https://martinfowler.com/articles/nonDeterminism.html) and internally this has been a large source of frustration with bad, or timing sensitive tests. Such tests typically require specific timing and/or implicitly require a certain execution order (loosely enforced by timing assumptions rather than explicitly via the code). @@ -188,6 +214,7 @@ On top of this, it is also reasonably well known that [use of the clock is nonde As a whole, introducing the clock explicitly (or implicitly via timers) is problematic because it introduces additional state, and thus assumptions on the requirements for the operation of the component. Consider also leap seconds and how that might ruin the operation and/or assumptions needed for the proper operation of the component. #### Preferred Patterns + In general, a data-driven pattern should be preferred to a timer-driven pattern. One reasonable exception to this guideline is the state estimator/filter at the end of localization. A timer-driven pattern in this context is useful to provide smooth behavior and promote looser coupling between the planning stack and the remainder of the stack. The core idea behind a data-driven pattern is that as soon as data arrives, it should be appropriately processed. Furthermore, the system clock (or any other source of time) should not be used to manipulate data or the timestamps. This pattern is valuable since it implicitly cuts down on hidden state (being the clock), and thus simplifies assumptions needed for the node to work. @@ -195,49 +222,61 @@ The core idea behind a data-driven pattern is that as soon as data arrives, it s For examples of this kind of pattern, see the lidar object detection stack in Autoware.Auto. By not using any mention of the clock save for in the drivers, the stack can run equivalently on bag data, simulation data, or live data. A similar pattern with multiple inputs can be seen in the MPC implementation both internally and externally. #### Replicating `ros::Timer` + Assuming you still want to replicate the existing `ros::Timer` functionality: There is `rclcpp::WallTimer`, which has a similar interface, but it's not equivalent. The wall timer uses a wall clock (`RCL_STEADY_TIME` clock), i.e. it doesn't listen to the `/clock` topic populated by simulation time. That the timer doesn't stop when simulation time stops, and doesn't go faster/slower when simulation time goes faster or slower. By contrast, the `GenericTimer` provides an interface to supply a clock, but there is no convenient function for setting up such a timer, comparable to `Node::create_wall_timer`. For now, this works: - auto timer_callback = std::bind(&VehicleCmdGate::onTimer, this); - auto period = std::chrono::duration_cast( - std::chrono::duration(update_period_)); - timer_ = std::make_shared>( - this->get_clock(), period, std::move(timer_callback), - this->get_node_base_interface()->get_context()); - this->get_node_timers_interface()->add_timer(timer_, nullptr); +```cpp +auto timer_callback = std::bind(&VehicleCmdGate::onTimer, this); +auto period = std::chrono::duration_cast( + std::chrono::duration(update_period_)); +timer_ = std::make_shared>( + this->get_clock(), period, std::move(timer_callback), + this->get_node_base_interface()->get_context()); +this->get_node_timers_interface()->add_timer(timer_, nullptr); +``` Also, this doesn't work, even with a subsequent `add_timer()` call: - timer_ = rclcpp::create_timer(this, this->get_clock(), period, timer_callback); +```cpp +timer_ = rclcpp::create_timer(this, this->get_clock(), period, timer_callback); +``` #### Rosbag recording -Unfortunately, one additional problem remains. `ros2 bag` does not record `/clock` (aka sim time) whereas `rosbag` does. This implies that in order to get the same behavior in ROS 2, either: - * `rosbag` along with the `ros1_brdge` must be used - * Some explicit time source must be used and explicitly recorded by `ros2 bag` +Unfortunately, one additional problem remains. `ros2 bag` does not record `/clock` (aka sim time) whereas `rosbag` does. This implies that in order to get the same behavior in ROS 2, either: +- `rosbag` along with the `ros1_bridge` must be used +- Some explicit time source must be used and explicitly recorded by `ros2 bag` ### Parameters + It's not strictly necessary, but you probably want to make sure the filename is `xyz.param.yaml`. Then come two steps: #### Adjust code - double vel_lim; - pnh_.param("vel_lim", vel_lim, 25.0); + +```cpp +double vel_lim; +pnh_.param("vel_lim", vel_lim, 25.0); +``` becomes - const double vel_lim = declare_parameter("vel_lim", 25.0); +```cpp +const double vel_lim = declare_parameter("vel_lim", 25.0); +``` which is equivalent to - const double vel_lim = declare_parameter("vel_lim", 25.0); +```cpp +const double vel_lim = declare_parameter("vel_lim", 25.0); +``` This allows to set the initial value e.g. via a parameter file. **NOTE** Calling `ros2 param set vel_lim 1.234` after starting the node works but will not -alter the member `vel_lim`! See the section below on *dynamic reconfigure* to achieve that. - +alter the member `vel_lim`! See the section below on _dynamic reconfigure_ to achieve that. ### `dynamic_reconfigure` @@ -260,7 +299,7 @@ take the [MPC follower](https://github.com/tier4/Pilot.Auto/pull/52) you need to set a parameter handler and callback function: -```c++ +```cpp OnSetParametersCallbackHandle::SharedPtr set_param_res_; rcl_interfaces::msg::SetParametersResult paramCallback(const std::vector & parameters); ``` @@ -268,7 +307,7 @@ rcl_interfaces::msg::SetParametersResult paramCallback(const std::vector::SharedPtr sub_param_event_; @@ -371,7 +413,7 @@ void paramCallback(const rcl_interfaces::msg::ParameterEvent::SharedPtr event); In .cpp, -```c++ +```cpp // client setting param_client_ = std::make_shared(this, "param_client"); sub_param_event_ = @@ -396,107 +438,126 @@ However, this method calls the callback for all parameter changes of all nodes. reference: -https://discourse.ros.org/t/composition-and-parameters-best-practice-suggestions/1001 - -https://github.com/ros2/rclcpp/issues/243 - Connect to preview +- +- #### Adjust param file + Two levels of hierarchy need to be added around the parameters themselves and each level has to be indented relative to its parent (by two spaces in this example): - : - ros__parameters: - +```xml +: + ros__parameters: + +``` ##### Types + Also, ROS1 didn't have a problem when you specify an integer, e.g. `28` for a `double` parameter, but ROS2 does: - [vehicle_cmd_gate-1] terminate called after throwing an instance of 'rclcpp::exceptions::InvalidParameterTypeException' - [vehicle_cmd_gate-1] what(): parameter 'vel_lim' has invalid type: expected [double] got [integer] +```sh +[vehicle_cmd_gate-1] terminate called after throwing an instance of 'rclcpp::exceptions::InvalidParameterTypeException' +[vehicle_cmd_gate-1] what(): parameter 'vel_lim' has invalid type: expected [double] got [integer] +``` Best to just change `28` to `28.0` in the param file. See also [this issue](https://github.com/ros2/rclcpp/issues/979). - ### Launch file -There is a [migration guide](https://index.ros.org/doc/ros2/Tutorials/Launch-files-migration-guide/). One thing it doesn't mention is that the `.launch` file also needs to be renamed to `.launch.xml`. +There is a [migration guide](https://index.ros.org/doc/ros2/Tutorials/Launch-files-migration-guide/). One thing it doesn't mention is that the `.launch` file also needs to be renamed to `.launch.xml`. ### Replacing `tf2_ros::Buffer` + A `tf2_ros::Buffer` member that is filled by a `tf2_ros::TransformListener` can become a `tf2::BufferCore` in most cases. This reduces porting effort, since the a `tf2::BufferCore` can be constructed like a ROS1 `tf2_ros::Buffer`. For an example, see [this PR](https://github.com/tier4/Pilot.Auto/pull/11). However, in some cases the extra functionality of `tf2_ros::Buffer` is needed. For instance, waiting for a transform to arrive, usually in the form of `lookupTransform()` with a timeout argument. #### Avoiding a lookup with timeout + Often, code doesn't really need a transform lookup with timeout. For instance, [this package](https://github.com/tier4/Pilot.Auto/pull/80/files#diff-1fd60e4ec61c376d6b6b088a7878676408a5ac6a665977590610be92d5079e55L270-L277) has a "main" subscription callback, `onTrigger()`, that waits for the most recent transform (`tf2::TimePointZero`), then checks if auxiliary data from other subscriptions is there and returns early from the callback if it isn't. In that case, I think the callback can simply treat transforms the same way as this auxiliary data, i.e. just do a simple `lookupTransform()` with no timeout and return early from the callback if it fails. The node won't do any work anyway until it's ready (i.e. has all the auxiliary data). Note that this pattern works only when the node is waiting for the most recent transform – if your callback wants to use the transform at a specific time, e.g. the timestamp of the message that triggered the callback, this pattern doesn't make sense. In that case, avoiding `waitForTransform()` requires refactoring the architecture of your system, but that topic is currently out of scope. It's worth keeping in mind that waiting for transforms in general can be troublesome – it is only a probabilistic solution that fails in a bad way for latency spikes, makes it hard to reason about the behavior of the whole system and probably incurs more latency than necessary. #### When you can't avoid a lookup with timeout + You should be able to use `tf2_ros::Buffer::lookupTransform()` with a timeout out of the box. There is one caveat, namely there has been at least one report of such an error: - Do not call canTransform or lookupTransform with a timeout unless you are using another thread for populating data. - Without a dedicated thread it will always timeout. - If you have a seperate thread servicing tf messages, call setUsingDedicatedThread(true) on your Buffer instance. +```txt +Do not call canTransform or lookupTransform with a timeout unless you are using another thread for populating data. +Without a dedicated thread it will always timeout. +If you have a separate thread servicing tf messages, call setUsingDedicatedThread(true) on your Buffer instance. +``` There is also the drawback of spamming the console with warnings like - Warning: Invalid frame ID "a" passed to canTransform argument target_frame - frame does not exist - at line 133 in /tmp/binarydeb/ros-foxy-tf2-0.13.6/src/buffer_core.cpp +```sh +Warning: Invalid frame ID "a" passed to canTransform argument target_frame - frame does not exist + at line 133 in /tmp/binarydeb/ros-foxy-tf2-0.13.6/src/buffer_core.cpp +``` if the frame has never been sent before. #### What about `waitForTransform()`? + There is also a [`waitForTransform` API](http://docs.ros2.org/foxy/api/tf2_ros/classtf2__ros_1_1Buffer.html#a832b188dd65cbec52cabc1ec07856e49) involving futures, but it has bugs and limitations. In particular: -* Its callback does not get called when the request can be answered immediately -* You can not call `get()` on the future and expect it to return the transform or throw an exception when the timeout ends. It continues waiting after the timeout expires if the future is not ready yet (i.e. the transform hasn't arrived). -This limits you to the following style of calling the API, which doesn't use the callback and limits the waiting time before calling `get()` on the future: +- Its callback does not get called when the request can be answered immediately +- You can not call `get()` on the future and expect it to return the transform or throw an exception when the timeout ends. It continues waiting after the timeout expires if the future is not ready yet (i.e. the transform hasn't arrived). +This limits you to the following style of calling the API, which doesn't use the callback and limits the waiting time before calling `get()` on the future: - // In the node definition - tf2_ros::Buffer tf_buffer_; - tf2_ros::TransformListener tf_listener_; - ... - // In the constructor - auto cti = std::make_shared(this->get_node_base_interface(), this->get_node_timers_interface()); - tf_buffer_.setCreateTimerInterface(cti); - ... - // In the function processing data - auto tf_future = tf_buffer_.waitForTransform(a, b, msg_time, std::chrono::milliseconds(0), [this](auto){}); - auto status = tf_future.wait_for(timeout_); - if (status == std::future_status::deferred) { - // This never happened in experiments - } else if (status == std::future_status::timeout) { - // The transform did not arrive within the timeout duration - } else { - // The transform is here, and can now be accessed without triggering the waiting-infinitely bug - auto transform = tf_future.get(); - } - +```cpp +// In the node definition +tf2_ros::Buffer tf_buffer_; +tf2_ros::TransformListener tf_listener_; +... +// In the constructor +auto cti = std::make_shared(this->get_node_base_interface(), this->get_node_timers_interface()); +tf_buffer_.setCreateTimerInterface(cti); +... +// In the function processing data +auto tf_future = tf_buffer_.waitForTransform(a, b, msg_time, std::chrono::milliseconds(0), [this](auto){}); +auto status = tf_future.wait_for(timeout_); +if (status == std::future_status::deferred) { + // This never happened in experiments +} else if (status == std::future_status::timeout) { + // The transform did not arrive within the timeout duration +} else { + // The transform is here, and can now be accessed without triggering the waiting-infinitely bug + auto transform = tf_future.get(); +} +``` The `waitForTransform()` function will return immediately. Note that the timeout passed to `waitForTransform()` does not matter, only the timeout passed to `wait_for()`. There is a bug with this when `tf2::TimeStampZero` is requested instead of a nonzero time: the status will be `ready`, but accessing the result with `get()` throws an exception. There is another bug where this bug is not triggered under some conditions, for extra fun. So do not use `waitForTransform()` with `tf2::TimeStampZero` (or any other way of saying "time 0"). The silver lining is that this can be often avoided anyway since it is the scenario described in the section before. -For more details, see https://github.com/nnmm/tf2_example and the [resulting table](https://docs.google.com/spreadsheets/d/1BvFIMwp0kSkQecw2dkkyj4kR9edFLRDDBBTGynHBAN8/edit?usp=sharing). - +For more details, see and the [resulting table](https://docs.google.com/spreadsheets/d/1BvFIMwp0kSkQecw2dkkyj4kR9edFLRDDBBTGynHBAN8/edit?usp=sharing). ### Shared pointers + Be careful in creating a `std::shared_ptr` to avoid a double-free situation when the constructor argument after porting is a dereferenced shared pointer. For example, if `msg` previously was a raw pointer and now is a shared pointer, the following would lead to both `msg` and `a` deleting the same resource. - auto a = std::make_shared(*msg); +```cpp +auto a = std::make_shared(*msg); +``` To avoid this, just copy the shared pointer - std::shared_ptr a = msg; - +```cpp +std::shared_ptr a = msg; +``` ### Service clients + There is no synchronous API for service calls, and the futures API can not be used from inside a node, only the callback API. The futures API is what is used in tutorials such as [Writing a simple service and client](https://index.ros.org/doc/ros2/Tutorials/Writing-A-Simple-Cpp-Service-And-Client/#write-the-client-node), but note that the call to `rclcpp::spin_until_future_complete()` does not happen from inside any subscriber callback or similar. If you do call it from inside a node, you will get +```sh terminate called after throwing an instance of 'std::runtime_error' what(): Node has already been added to an executor. +``` The node itself is already added to the executor in `rclcpp::spin()` function inside the main function, and `rclcpp::spin_until_future_complete()` tries to add the node to another executor. @@ -506,17 +567,21 @@ So you're left with using a [callback](http://docs.ros2.org/foxy/api/rclcpp/clas Another idea for a workaround is to do something similar to what is done in the `rclcpp::spin_until_future_complete()` function by ourselves. Another possible avenue is using multithreaded executors, see [this post](https://answers.ros.org/question/343279/ros2-how-to-implement-a-sync-service-client-in-a-node/) for some more detail. - ### Logging + The node name is now automatically prepended to the log message, so that part can be removed. In methods, get the logger from the node with `get_logger()`. In a free function `foo()`, use `rclcpp::get_logger("foo")`. To provide a further level of hierarchy, use `get_logger("foo").get_child("bar")`. For example, - ROS_INFO_COND(show_debug_info_, "[MPC] some message with a float value %g", some_member_); +```cpp +ROS_INFO_COND(show_debug_info_, "[MPC] some message with a float value %g", some_member_); +``` should become - RCLCPP_INFO_EXPRESSION(get_logger(), show_debug_info_, "some message with a float value %g", some_member_); +```cpp +RCLCPP_INFO_EXPRESSION(get_logger(), show_debug_info_, "some message with a float value %g", some_member_); +``` The mapping of logger macros is basically just @@ -537,43 +602,52 @@ with the exception of where the `duration` is an integer interpreted as milliseconds as opposed to seconds in ROS1. A readable way to document that is - RCLCPP_WARN_SKIPFIRST_THROTTLE(get_logger(), *get_clock(), 5000 /* ms */, ...) - +```cpp +RCLCPP_WARN_SKIPFIRST_THROTTLE(get_logger(), *get_clock(), 5000 /* ms */, ...) +``` ### Shutting down a subscriber -The `shutdown()` method doesn't exist anymore, but you can just throw away the subscriber with `this->subscription_ = nullptr;` or similar, for instance inside the subscription callback. Curiously, this works even though the `subscription_` member variable is not the sole owner – the `use_count` is 3 in the `minimal_subscriber` example. +The `shutdown()` method doesn't exist anymore, but you can just throw away the subscriber with `this->subscription_ = nullptr;` or similar, for instance inside the subscription callback. Curiously, this works even though the `subscription_` member variable is not the sole owner – the `use_count` is 3 in the `minimal_subscriber` example. ### Durations -Beware of just replacing `ros::Duration` with `rclcpp::Duration` – it compiles, but now expects nanoseconds instead of seconds. Use `rclcpp::Duration::from_seconds` instead. +Beware of just replacing `ros::Duration` with `rclcpp::Duration` – it compiles, but now expects nanoseconds instead of seconds. Use `rclcpp::Duration::from_seconds` instead. ## Alternative: Semi-automated porting with ros2-migration-tools (not working) + **The following instructions to use `ros2-migration-tools` are given for completeness, we gave up and decided to port packages manually.** -From https://github.com/awslabs/ros2-migration-tools: +From : - pip3 install parse_cmake - git clone https://github.com/awslabs/ros2-migration-tools.git - wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz - tar xaf clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz - cp -r clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/lib/libclang.so ros2-migration-tools/clang/ - cp -r clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/lib/libclang.so.10 ros2-migration-tools/clang/ - cp -r clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/include/ ros2-migration-tools/clang/clang +```sh +pip3 install parse_cmake +git clone https://github.com/awslabs/ros2-migration-tools.git +wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz +tar xaf clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz +cp -r clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/lib/libclang.so ros2-migration-tools/clang/ +cp -r clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/lib/libclang.so.10 ros2-migration-tools/clang/ +cp -r clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/include/ ros2-migration-tools/clang/clang +``` -The package needs to be **built with ROS1**. I followed http://wiki.ros.org/noetic/Installation/Ubuntu outside of ADE +The package needs to be **built with ROS1**. I followed outside of ADE - sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' - sudo apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654 - sudo apt update - sudo apt install ros-melodic-ros-base +```sh +sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' +sudo apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654 +sudo apt update +sudo apt install ros-melodic-ros-base +``` -In https://github.com/awslabs/ros2-migration-tools#setup-the-ros1-packages, it instructs me to compile a ros1 package with colcon to get started. +In , it instructs me to compile a ros1 package with colcon to get started. - colcon build --cmake-args -DCMAKE_EXPORT_COMPILE_COMMANDS=ON +```sh +colcon build --cmake-args -DCMAKE_EXPORT_COMPILE_COMMANDS=ON +``` And that fails. I thought `colcon` is only for ROS2 so it should only work after porting, not before. I retried with `catkin_make` but also ran into issues there -``` + +```sh frederik.beaujean@frederik-beaujean-01:~/ade-home/AutowareArchitectureProposal$ catkin_make --source src/autoware/autoware.iv/control/shift_decider/ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON Base path: /home/frederik.beaujean/ade-home/AutowareArchitectureProposal Source space: /home/frederik.beaujean/ade-home/AutowareArchitectureProposal/src/autoware/autoware.iv/control/shift_decider @@ -586,63 +660,74 @@ Install space: /home/frederik.beaujean/ade-home/AutowareArchitectureProposal/ins CMake Error: The source "/home/frederik.beaujean/ade-home/AutowareArchitectureProposal/src/autoware/autoware.iv/control/shift_decider/CMakeLists.txt" does not match the source "/home/frederik.beaujean/ade-home/AutowareArchitectureProposal/src/CMakeLists.txt" used to generate cache. Re-run cmake with a different source directory. Invoking "cmake" failed ``` + According to an expert: -> You should be able to compile it with colcon, cause it works for both ROS 1 and ROS 2 code. You are getting the same error with catkin so it's probably something related to ROS 1 and the build instructions. +> You should be able to compile it with colcon, cause it works for both ROS 1 and ROS 2 code. You are getting the same error with catkin so it's probably something related to ROS 1 and the build instructions. ## Strange errors and their causes -Some error messages are so unhelpful that it might help to collect them and their causes. +Some error messages are so unhelpful that it might help to collect them and their causes. ### package.xml errors -If you forget `ament_cmake`, or you use package format 2 in combination with a package format 3 tag like ``, you'll get the unhelpful error - CMake Error at /usr/share/cmake-3.16/Modules/FindPackageHandleStandardArgs.cmake:146 (message): - Could NOT find FastRTPS (missing: FastRTPS_INCLUDE_DIR FastRTPS_LIBRARIES) +If you forget `ament_cmake`, or you use package format 2 in combination with a package format 3 tag like ``, you'll get the unhelpful error +```sh +CMake Error at /usr/share/cmake-3.16/Modules/FindPackageHandleStandardArgs.cmake:146 (message): + Could NOT find FastRTPS (missing: FastRTPS_INCLUDE_DIR FastRTPS_LIBRARIES) +``` ### YAML param file #### Tabs instead of spaces + Used tabs instead of spaces in your param.yaml file? _Clearly_, the most user-friendly error message is - $ ros2 launch mypackage mypackage.launch.xml - [INFO] [launch]: All log files can be found below /home/user/.ros/log/2020-10-19-19-09-13-676799-t4-30425 - [INFO] [launch]: Default logging verbosity is set to INFO - [INFO] [mypackage-1]: process started with pid [30427] - [mypackage-1] [ERROR] [1603127353.755503075] [rcl]: Failed to parse global arguments - [mypackage-1] - [mypackage-1] >>> [rcutils|error_handling.c:108] rcutils_set_error_state() - [mypackage-1] This error state is being overwritten: - [mypackage-1] - [mypackage-1] 'Couldn't parse params file: '--params-file /home/user/workspace/install/mypackage/share/mypackage/param/myparameters.yaml'. Error: Error parsing a event near line 1, at /tmp/binarydeb/ros-foxy-rcl-yaml-param-parser-1.1.8/src/parse.c:599, at /tmp/binarydeb/ros-foxy-rcl-1.1.8/src/rcl/arguments.c:391' - [mypackage-1] - [mypackage-1] with this new error message: - [mypackage-1] - [mypackage-1] 'context is zero-initialized, at /tmp/binarydeb/ros-foxy-rcl-1.1.8/src/rcl/context.c:51' - [mypackage-1] - [mypackage-1] rcutils_reset_error() should be called after error handling to avoid this. - [mypackage-1] <<< - [mypackage-1] [ERROR] [1603127353.755523149] [rclcpp]: failed to finalize context: context is zero-initialized, at /tmp/binarydeb/ros-foxy-rcl-1.1.8/src/rcl/context.c:51 - [mypackage-1] terminate called after throwing an instance of 'rclcpp::exceptions::RCLInvalidROSArgsError' - [mypackage-1] what(): failed to initialize rcl: error not set - [ERROR] [mypackage-1]: process has died [pid 30427, exit code -6, cmd '/home/user/workspace/install/mypackage/lib/mypackage/mypackage --ros-args -r __node:=mypackage --params-file /home/user/workspace/install/mypackage/share/mypackage/param/myparameters.yaml']. +```sh +$ ros2 launch mypackage mypackage.launch.xml +[INFO] [launch]: All log files can be found below /home/user/.ros/log/2020-10-19-19-09-13-676799-t4-30425 +[INFO] [launch]: Default logging verbosity is set to INFO +[INFO] [mypackage-1]: process started with pid [30427] +[mypackage-1] [ERROR] [1603127353.755503075] [rcl]: Failed to parse global arguments +[mypackage-1] +[mypackage-1] >>> [rcutils|error_handling.c:108] rcutils_set_error_state() +[mypackage-1] This error state is being overwritten: +[mypackage-1] +[mypackage-1] 'Couldn't parse params file: '--params-file /home/user/workspace/install/mypackage/share/mypackage/param/myparameters.yaml'. Error: Error parsing a event near line 1, at /tmp/binarydeb/ros-foxy-rcl-yaml-param-parser-1.1.8/src/parse.c:599, at /tmp/binarydeb/ros-foxy-rcl-1.1.8/src/rcl/arguments.c:391' +[mypackage-1] +[mypackage-1] with this new error message: +[mypackage-1] +[mypackage-1] 'context is zero-initialized, at /tmp/binarydeb/ros-foxy-rcl-1.1.8/src/rcl/context.c:51' +[mypackage-1] +[mypackage-1] rcutils_reset_error() should be called after error handling to avoid this. +[mypackage-1] <<< +[mypackage-1] [ERROR] [1603127353.755523149] [rclcpp]: failed to finalize context: context is zero-initialized, at /tmp/binarydeb/ros-foxy-rcl-1.1.8/src/rcl/context.c:51 +[mypackage-1] terminate called after throwing an instance of 'rclcpp::exceptions::RCLInvalidROSArgsError' +[mypackage-1] what(): failed to initialize rcl: error not set +[ERROR] [mypackage-1]: process has died [pid 30427, exit code -6, cmd '/home/user/workspace/install/mypackage/lib/mypackage/mypackage --ros-args -r __node:=mypackage --params-file /home/user/workspace/install/mypackage/share/mypackage/param/myparameters.yaml']. +``` and that is indeed what ROS2 will tell you. - #### No indentation Without proper indentation of levels, there is a segfault when the YAML is parsed during `rclcpp::init(argc, argv)`. The error is similar to the above but begins with - [mpc_follower-1] free(): double free detected in tcache 2 +```sh +[mpc_follower-1] free(): double free detected in tcache 2 +``` Note that this message may be hidden when just launching with `ros2 launch`. It is shown running the node under `valgrind` which requires a `launch-prefix`. For example, modify `mpc_follower.launch.xml` - +```xml + +``` Another helpful option for diagnosing segfaults is to run under `gdb` to get a backtrace. Change the prefix - +```xml + +``` and after the segfault occurred, you can enter `bt` in the `xterm` window. diff --git a/docs/tutorial/HowToInstall.md b/docs/tutorial/HowToInstall.md index 820dc9921b6c9..b2a72b88e6619 100644 --- a/docs/tutorial/HowToInstall.md +++ b/docs/tutorial/HowToInstall.md @@ -3,6 +3,7 @@ > Note: If the CUDA or TensorRT frameworks have already been installed, we strongly recommend uninstalling them first. 1. Set up the Autoware repository + ```sh mkdir -p ~/workspace cd ~/workspace @@ -11,13 +12,16 @@ cd autoware.proj ``` 2. Run the setup script to install CUDA, cuDNN 8, OSQP, ROS 2 and TensorRT 7, entering 'y' when prompted (this step will take around 45 minutes) + ```sh ./setup_ubuntu20.04.sh ``` 3. Build the source code (this will take around 15 minutes) + ```sh source ~/.bashrc colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release --catkin-skip-building-tests ``` + > Several modules will report stderr output, but these are just warnings and can be safely ignored. diff --git a/docs/tutorial/QuickStart.md b/docs/tutorial/QuickStart.md index cc27323834ec1..8972dda2b4226 100644 --- a/docs/tutorial/QuickStart.md +++ b/docs/tutorial/QuickStart.md @@ -4,22 +4,25 @@ 1. [Download the sample pointcloud and vector maps](https://drive.google.com/open?id=1ovrJcFS5CZ2H51D8xVWNtEvj_oiXW-zk), unpack the zip archive and copy the two map files to the same folder. 2. Download the sample rosbag files and put them into the same folder, e.g., `~/rosbag2/sample/`. - - [db3](https://drive.google.com/file/d/1wLWyOlfH_-k4VYBgae1KAFlKdwJnH_si/view?usp=sharing) - - [yaml](https://drive.google.com/file/d/1Arb-QVnNHM-BFdB_icm7J7fWkyuZt7mZ/view?usp=sharing) - + + - [db3](https://drive.google.com/file/d/1wLWyOlfH_-k4VYBgae1KAFlKdwJnH_si/view?usp=sharing) + - [yaml](https://drive.google.com/file/d/1Arb-QVnNHM-BFdB_icm7J7fWkyuZt7mZ/view?usp=sharing) + 3. Open a terminal and launch Autoware -```sh -cd ~/workspace/autoware.proj -source install/setup.bash -ros2 launch autoware_launch logging_simulator.launch.xml map_path:=/path/to/map_folder vehicle_model:=lexus sensor_model:=aip_xx1 rosbag:=true perception:=false -``` + + ```sh + cd ~/workspace/autoware.proj + source install/setup.bash + ros2 launch autoware_launch logging_simulator.launch.xml map_path:=/path/to/map_folder vehicle_model:=lexus sensor_model:=aip_xx1 rosbag:=true perception:=false + ``` 4. Open a second terminal and play the sample rosbag file -```sh -cd ~/workspace/autoware.proj -source install/setup.bash -ros2 bag play /path/to/sample.625-2.bag2_0.db3 -r 0.2 -``` + + ```sh + cd ~/workspace/autoware.proj + source install/setup.bash + ros2 bag play /path/to/sample.625-2.bag2_0.db3 -r 0.2 + ``` 5. Focus the view on the ego vehicle by changing the `Target Frame` in the RViz Views panel from `viewer` to `base_link`. @@ -32,6 +35,7 @@ ros2 bag play /path/to/sample.625-2.bag2_0.db3 -r 0.2 1. [Download the sample pointcloud and vector maps](https://drive.google.com/open?id=197kgRfSomZzaSbRrjWTx614le2qN-oxx), unpack the zip archive and copy the two map files to the same folder. 2. Open a terminal and launch Autoware + ```sh cd ~/workspace/autoware.proj source install/setup.bash @@ -39,14 +43,19 @@ ros2 launch autoware_launch planning_simulator.launch.xml map_path:=/path/to/map ``` 3. Set an initial pose for the ego vehicle - - a) Click the `2D Pose estimate` button in the toolbar, or hit the `P` key - - b) In the 3D View pane, click and hold the left-mouse button, and then drag to set the direction for the initial pose. + + - a) Click the `2D Pose estimate` button in the toolbar, or hit the `P` key + - b) In the 3D View pane, click and hold the left-mouse button, and then drag to set the direction for the initial pose. + 4. Set a goal pose for the ego vehicle - - a) Click the `2D Nav Goal` button in the toolbar, or hit the `G` key - - b) In the 3D View pane, click and hold the left-mouse button, and then drag to set the direction for the goal pose. + + - a) Click the `2D Nav Goal` button in the toolbar, or hit the `G` key + - b) In the 3D View pane, click and hold the left-mouse button, and then drag to set the direction for the goal pose. + 5. Engage the ego vehicle. - - a) Open the [autoware_web_controller](http://localhost:8085/autoware_web_controller/) in a browser. - - b) Click the `Engage` button. + + - a) Open the [autoware_web_controller](http://localhost:8085/autoware_web_controller/) in a browser. + - b) Click the `Engage` button. ### Note diff --git a/docs/tutorial/Requirements.md b/docs/tutorial/Requirements.md index 5d14c5f7b7f3f..94aad72f44ad4 100644 --- a/docs/tutorial/Requirements.md +++ b/docs/tutorial/Requirements.md @@ -5,10 +5,12 @@ - x86 CPU (8 cores) - 16GB RAM - [Optional] NVIDIA GPU (4GB RAM) + > Performance will be improved with more cores, RAM and a higher-spec graphics card. + - Although not required to run basic functionality, a GPU is mandatory in order to run the following components: - - Perception/ObjectRecofnition - - Perception/TrafficLightRecognitionRecofnition + - Perception/ObjectRecognition + - Perception/TrafficLightRecognitionRecognition ## Software diff --git a/docs/tutorial/SimulationTutorial.md b/docs/tutorial/SimulationTutorial.md index 531f87a9642b7..82b9af0c49874 100644 --- a/docs/tutorial/SimulationTutorial.md +++ b/docs/tutorial/SimulationTutorial.md @@ -1,8 +1,10 @@ # Simulation in Autoware Autoware provides two types of simulation: + - rosbag-based simulation that can be used for testing/validation of the `Sensing`, `Localization` and `Perception` stacks. - The Planning Simulator tool which is mainly used for testing/validation of `Planning` stack by simulating traffic rules, interactions with dynamic objects and control commands to the ego vehicle. + ![sim](https://user-images.githubusercontent.com/8327598/79709776-0bd47b00-82fe-11ea-872e-d94ef25bc3bf.png) ## How to use a pre-recorded rosbag file for simulation @@ -11,42 +13,46 @@ Autoware provides two types of simulation: 1. Download the sample pointcloud and vector maps from [here](https://drive.google.com/open?id=197kgRfSomZzaSbRrjWTx614le2qN-oxx), unpack the zip archive and copy the two map files to the same folder. 2. Download the sample rosbag files and put them into the same folder, e.g., `~/rosbag2/sample/`. - - db3 https://drive.google.com/file/d/1wLWyOlfH_-k4VYBgae1KAFlKdwJnH_si/view?usp=sharing - - yaml https://drive.google.com/file/d/1Arb-QVnNHM-BFdB_icm7J7fWkyuZt7mZ/view?usp=sharing - -| Sensor | Topic name | -| --------------------- | ---------------------------------------- | -| Velodyne 128 (Top) | /sensing/velodyne/top/velodyne_packets | -| Velodyne 16 (Right) | /sensing/velodyne/right/velodyne_packets | -| Velodyne 16 (Left) | /sensing/velodyne/left/velodyne_packets | -| IMU (Tamagawa TAG300) | /sensing/imu/tamagawa/imu_raw | -| GNSS (Ublox F9P) | /sensing/gnss/ublox/fix_velocity | -| | /sensing/gnss/ublox/nav_sat_fix | -| | /sensing/gnss/ublox/navpvt | -| CAN data | /vehicle/status/control_mode | -| | /vehicle/status/shift | -| | /vehicle/status/steering | -| | /vehicle/status/twist | -| ~~Camera x 7~~ | ~~/sensing/camera/camera[]/image_raw~~ | - -> Note: Due to privacy concerns, image data has been removed from the rosbag file. + + - db3 + - yaml + + | Sensor | Topic name | + | --------------------- | ---------------------------------------- | + | Velodyne 128 (Top) | /sensing/velodyne/top/velodyne_packets | + | Velodyne 16 (Right) | /sensing/velodyne/right/velodyne_packets | + | Velodyne 16 (Left) | /sensing/velodyne/left/velodyne_packets | + | IMU (Tamagawa TAG300) | /sensing/imu/tamagawa/imu_raw | + | GNSS (Ublox F9P) | /sensing/gnss/ublox/fix_velocity | + | | /sensing/gnss/ublox/nav_sat_fix | + | | /sensing/gnss/ublox/navpvt | + | CAN data | /vehicle/status/control_mode | + | | /vehicle/status/shift | + | | /vehicle/status/steering | + | | /vehicle/status/twist | + | ~~Camera x 7~~ | ~~/sensing/camera/camera[]/image_raw~~ | + + > Note: Due to privacy concerns, image data has been removed from the rosbag file. 3. Open a terminal and launch Autoware in "rosbag mode". -```sh -cd ~/workspace/autoware.proj -source install/setup.bash -ros2 launch autoware_launch logging_simulator.launch.xml map_path:=/path/to/map_folder vehicle_model:=lexus sensor_model:=aip_xx1 rosbag:=true perception:=false -``` + + ```sh + cd ~/workspace/autoware.proj + source install/setup.bash + ros2 launch autoware_launch logging_simulator.launch.xml map_path:=/path/to/map_folder vehicle_model:=lexus sensor_model:=aip_xx1 rosbag:=true perception:=false + ``` 4. Open a second terminal and play the sample rosbag file -```sh -cd ~/workspace/autoware.proj -source install/setup.bash -ros2 bag play /path/to/sample.625-2.bag2_0.db3 -r 0.2 -``` + + ```sh + cd ~/workspace/autoware.proj + source install/setup.bash + ros2 bag play /path/to/sample.625-2.bag2_0.db3 -r 0.2 + ``` + ![rosbag_sim](https://user-images.githubusercontent.com/10920881/79726334-9381b000-8325-11ea-9ac6-ebbb29b11f14.png) -##### Note +### Note - Sample map and rosbag: © 2020 Tier IV, Inc. @@ -57,35 +63,45 @@ ros2 bag play /path/to/sample.625-2.bag2_0.db3 -r 0.2 1. Download the sample pointcloud and vector maps from [here](https://drive.google.com/open?id=197kgRfSomZzaSbRrjWTx614le2qN-oxx), unpack the zip archive and copy the two map files to the same folder. 2. Launch Autoware with Planning Simulator -```sh -cd ~/workspace/autoware.proj -source install/setup.bash -ros2 launch autoware_launch planning_simulator.launch.xml map_path:=/path/to/map_folder vehicle_model:=lexus sensor_model:=aip_xx1 -``` -![initial](https://user-images.githubusercontent.com/10920881/79816587-8b298380-83be-11ea-967c-8c45772e30f4.png) + + ```sh + cd ~/workspace/autoware.proj + source install/setup.bash + ros2 launch autoware_launch planning_simulator.launch.xml map_path:=/path/to/map_folder vehicle_model:=lexus sensor_model:=aip_xx1 + ``` + + ![initial](https://user-images.githubusercontent.com/10920881/79816587-8b298380-83be-11ea-967c-8c45772e30f4.png) 3. Set an initial pose for the ego vehicle - - a) Click the `2D Pose estimate` button in the toolbar, or hit the `P` key - - b) In the 3D View pane, click and hold the left-mouse button, and then drag to set the direction for the initial pose. -![start](https://user-images.githubusercontent.com/10920881/79816595-8e247400-83be-11ea-857a-32cf096ac3dc.png) + + - a) Click the `2D Pose estimate` button in the toolbar, or hit the `P` key + - b) In the 3D View pane, click and hold the left-mouse button, and then drag to set the direction for the initial pose. + + ![start](https://user-images.githubusercontent.com/10920881/79816595-8e247400-83be-11ea-857a-32cf096ac3dc.png) 4. Set a goal pose for the ego vehicle - - a) Click the "2D Nav Goal" button in the toolbar, or hit the `G` key - - b) In the 3D View pane, click and hold the left-mouse button, and then drag to set the direction for the goal pose. -![goal](https://user-images.githubusercontent.com/10920881/79816596-8fee3780-83be-11ea-9ee4-caabbef3a385.png) + + - a) Click the "2D Nav Goal" button in the toolbar, or hit the `G` key + - b) In the 3D View pane, click and hold the left-mouse button, and then drag to set the direction for the goal pose. + + ![goal](https://user-images.githubusercontent.com/10920881/79816596-8fee3780-83be-11ea-9ee4-caabbef3a385.png) 5. Engage the ego vehicle. - - a. Open the [autoware_web_controller](http://localhost:8085/autoware_web_controller/) in a browser. - - b. Click the `Engage` button. -![engage](https://user-images.githubusercontent.com/10920881/79714298-4db7ee00-830b-11ea-9ac4-11e126d7a7c4.png) + + - a) Open the [autoware_web_controller](http://localhost:8085/autoware_web_controller/) in a browser. + - b) Click the `Engage` button. + + ![engage](https://user-images.githubusercontent.com/10920881/79714298-4db7ee00-830b-11ea-9ac4-11e126d7a7c4.png) ### Simulate dummy obstacles - Set the position of dummy obstacle by clicking the `2D Dummy Pedestrian` or `2D Dummy Car` buttons in Rviz. - - These two buttons correspond to the shortcut keys `L` and `K` respectively. - - The properties of an object (including velocity, position/orientation error etc) can be adjusted via the `Tool Properties` panel in Rviz. - - Objects placed in the 3D View can be deleted by clicking the `Delete All Objects` button in Rviz and then clicking inside the 3D View pane. - ![dummy](https://user-images.githubusercontent.com/10920881/79742437-c9cb2980-833d-11ea-8ad7-7c3ed1a96540.png) + + - These two buttons correspond to the shortcut keys `L` and `K` respectively. + - The properties of an object (including velocity, position/orientation error etc) can be adjusted via the `Tool Properties` panel in Rviz. + - Objects placed in the 3D View can be deleted by clicking the `Delete All Objects` button in Rviz and then clicking inside the 3D View pane. + +![dummy](https://user-images.githubusercontent.com/10920881/79742437-c9cb2980-833d-11ea-8ad7-7c3ed1a96540.png) ### Simulate parking maneuver @@ -93,6 +109,6 @@ ros2 launch autoware_launch planning_simulator.launch.xml map_path:=/path/to/map ![parking](https://user-images.githubusercontent.com/10920881/79817389-56b6c700-83c0-11ea-873b-6ec73c8a5c38.png) -##### Note +### Note - sample map : © 2020 TierIV inc. diff --git a/mkdocs.yml b/mkdocs.yml index 7cc9189ae1e27..ec2321fd80504 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -46,59 +46,59 @@ extra_javascript: nav: - Home: docs/Readme.md - Tutorials: - - Requirements: docs/tutorial/Requirements.md - - How to install: docs/tutorial/HowToInstall.md - - How to run: - - Quick start: docs/tutorial/QuickStart.md - - Simulation: docs/tutorial/SimulationTutorial.md + - Requirements: docs/tutorial/Requirements.md + - How to install: docs/tutorial/HowToInstall.md + - How to run: + - Quick start: docs/tutorial/QuickStart.md + - Simulation: docs/tutorial/SimulationTutorial.md - Design: - - Architecture: - - Overview: docs/design/software_architecture/Overview.md - - Sensing: docs/design/software_architecture/Sensing/Sensing.md - - Localization: - - Overall: docs/design/software_architecture/Localization/Localization.md - - Pose estimator: docs/design/software_architecture/Localization/PoseEstimator/PoseEstimator.md - - Twist estimator: docs/design/software_architecture/Localization/TwistEstimator/TwistEstimator.md - - Pose twist fusion filter: docs/design/software_architecture/Localization/PoseTwistFusionFilter/PoseTwistFusionFilter.md - - Perception: - - Overall: docs/design/software_architecture/Perception/Perception.md - - Object recognition: - - Detection: docs/design/software_architecture/Perception/ObjectRecognition/Detection/Detection.md - - Tracking: docs/design/software_architecture/Perception/ObjectRecognition/Tracking/Tracking.md - - Prediction: docs/design/software_architecture/Perception/ObjectRecognition/Prediction/Prediction.md - - Traffic light recognition: - - Detection: docs/design/software_architecture/Perception/TrafficLightRecognition/Detection/Detection.md - - Classification: docs/design/software_architecture/Perception/TrafficLightRecognition/Classification/Classification.md - - Planning: - - Overall: docs/design/software_architecture/Planning/Planning.md - - Scenario selector: docs/design/software_architecture/Planning/ScenarioSelector/ScenarioSelector.md - - Lane driving Scenario: - - Overall: docs/design/software_architecture/Planning/LaneDriving/LaneDrivingScenario.md - - Behavior path planning: docs/design/software_architecture/Planning/LaneDriving/Behavior/LaneChangePlanner.md - - Behavior velocity planning: docs/design/software_architecture/Planning/LaneDriving/Behavior/BehaviorVelocityPlanner.md - - Parking: docs/design/software_architecture/Planning/Parking/ParkingScenario.md - - Design rationale: docs/design/software_architecture/Planning/DesignRationale.md - - Control: - - Overall: docs/design/software_architecture/Control/Control.md - - Trajectory follower: - - Lateral controller: docs/design/software_architecture/Control/TrajectoryFollower/LateralController.md - - Longitudinal controller: docs/design/software_architecture/Control/TrajectoryFollower/LongitudinalController.md - - Lateral longitudinal coupler: docs/design/software_architecture/Control/TrajectoryFollower/LatLonCoupler.md - - Vehicle cmd gate: docs/design/software_architecture/Control/VehicleCmdGate/VehicleCmdGate.md - - Vehicle: docs/design/software_architecture/Vehicle/Vehicle.md - - Map: docs/design/software_architecture/Map/Map.md - - TF: docs/design/software_architecture/TF.md - - Naming convention: docs/design/software_architecture/NamingConvention.md - - For developers: docs/design/software_architecture/ForDevelopers.md - - Node diagram: docs/design/software_architecture/NodeDiagram.md - - Repositry: docs/design/repository/Repository.md - - Release: docs/design/release/Release.md - - DevelopperGuide: - - Coding guideline: docs/developper_guide/CodingGuideline.md - - Pull request guideline: docs/developper_guide/PullRequestGuideline.md - - Unit test Guideline: docs/developper_guide/UnitTestGuideline.md - - Knowhow: - - docs/developper_guide/knowhow/PortingToROS2.md + - Architecture: + - Overview: docs/design/software_architecture/Overview.md + - Sensing: docs/design/software_architecture/Sensing/Sensing.md + - Localization: + - Overall: docs/design/software_architecture/Localization/Localization.md + - Pose estimator: docs/design/software_architecture/Localization/PoseEstimator/PoseEstimator.md + - Twist estimator: docs/design/software_architecture/Localization/TwistEstimator/TwistEstimator.md + - Pose twist fusion filter: docs/design/software_architecture/Localization/PoseTwistFusionFilter/PoseTwistFusionFilter.md + - Perception: + - Overall: docs/design/software_architecture/Perception/Perception.md + - Object recognition: + - Detection: docs/design/software_architecture/Perception/ObjectRecognition/Detection/Detection.md + - Tracking: docs/design/software_architecture/Perception/ObjectRecognition/Tracking/Tracking.md + - Prediction: docs/design/software_architecture/Perception/ObjectRecognition/Prediction/Prediction.md + - Traffic light recognition: + - Detection: docs/design/software_architecture/Perception/TrafficLightRecognition/Detection/Detection.md + - Classification: docs/design/software_architecture/Perception/TrafficLightRecognition/Classification/Classification.md + - Planning: + - Overall: docs/design/software_architecture/Planning/Planning.md + - Scenario selector: docs/design/software_architecture/Planning/ScenarioSelector/ScenarioSelector.md + - Lane driving Scenario: + - Overall: docs/design/software_architecture/Planning/LaneDriving/LaneDrivingScenario.md + - Behavior path planning: docs/design/software_architecture/Planning/LaneDriving/Behavior/LaneChangePlanner.md + - Behavior velocity planning: docs/design/software_architecture/Planning/LaneDriving/Behavior/BehaviorVelocityPlanner.md + - Parking: docs/design/software_architecture/Planning/Parking/ParkingScenario.md + - Design rationale: docs/design/software_architecture/Planning/DesignRationale.md + - Control: + - Overall: docs/design/software_architecture/Control/Control.md + - Trajectory follower: + - Lateral controller: docs/design/software_architecture/Control/TrajectoryFollower/LateralController.md + - Longitudinal controller: docs/design/software_architecture/Control/TrajectoryFollower/LongitudinalController.md + - Lateral longitudinal coupler: docs/design/software_architecture/Control/TrajectoryFollower/LatLonCoupler.md + - Vehicle cmd gate: docs/design/software_architecture/Control/VehicleCmdGate/VehicleCmdGate.md + - Vehicle: docs/design/software_architecture/Vehicle/Vehicle.md + - Map: docs/design/software_architecture/Map/Map.md + - TF: docs/design/software_architecture/TF.md + - Naming convention: docs/design/software_architecture/NamingConvention.md + - For developers: docs/design/software_architecture/ForDevelopers.md + - Node diagram: docs/design/software_architecture/NodeDiagram.md + - Repository: docs/design/repository/Repository.md + - Release: docs/design/release/Release.md + - DeveloperGuide: + - Coding guideline: docs/developer_guide/CodingGuideline.md + - Pull request guideline: docs/developer_guide/PullRequestGuideline.md + - Unit test Guideline: docs/developer_guide/UnitTestGuideline.md + - Knowhow: + - docs/developer_guide/knowhow/PortingToROS2.md - License/Credit: - - Credits: docs/Credits.md - - License: docs/LICENSE + - Credits: docs/Credits.md + - License: docs/LICENSE diff --git a/scripts/repos2workspace.py b/scripts/repos2workspace.py index 77635db02affe..e2782f75509ad 100755 --- a/scripts/repos2workspace.py +++ b/scripts/repos2workspace.py @@ -8,33 +8,33 @@ def repos2workspace(repos_file, output_file): - with open(repos_file, "r") as f: + with open(repos_file, 'r') as f: repos = yaml.load(f, Loader=yaml.SafeLoader) - paths = [f"src/{path}" for path in repos["repositories"]] - folders = [{"path": "."}] + paths = [f'src/{path}' for path in repos['repositories']] + folders = [{'path': '.'}] workspace = { - "folders": folders, - "settings": {"git.ignoredRepositories": ["."], "git.scanRepositories": paths}, + 'folders': folders, + 'settings': {'git.ignoredRepositories': ['.'], 'git.scanRepositories': paths}, } - with open(output_file, "w") as f: + with open(output_file, 'w') as f: json.dump(workspace, f, indent=2, sort_keys=False) def main(args): parser = argparse.ArgumentParser() - parser.add_argument("repos_file", type=Path) - parser.add_argument("-o", "--output", dest="output_file", + parser.add_argument('repos_file', type=Path) + parser.add_argument('-o', '--output', dest='output_file', type=Path, default=None) ns = parser.parse_args(args) if not ns.output_file: - ns.output_file = f"{ns.repos_file.absolute().parent.name}.code-workspace" + ns.output_file = f'{ns.repos_file.absolute().parent.name}.code-workspace' repos2workspace(ns.repos_file, ns.output_file) -if __name__ == "__main__": +if __name__ == '__main__': main(sys.argv[1:])